prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import copy
import importlib
import itertools
import os
import sys
import warnings
import numpy as np
import pandas as pd
try:
import ixmp
has_ix = True
except ImportError:
has_ix = False
from pyam import plotting
from pyam.logger import logger
from pyam.run_control import run_control
from pyam.utils import (
write_sheet,
read_ix,
read_files,
read_pandas,
format_data,
pattern_match,
years_match,
isstr,
islistable,
cast_years_to_int,
META_IDX,
YEAR_IDX,
REGION_IDX,
IAMC_IDX,
SORT_IDX,
LONG_IDX,
)
from pyam.timeseries import fill_series
class IamDataFrame(object):
"""This class is a wrapper for dataframes following the IAMC format.
It provides a number of diagnostic features (including validation of data,
completeness of variables provided) as well as a number of visualization
and plotting tools.
"""
def __init__(self, data, **kwargs):
"""Initialize an instance of an IamDataFrame
Parameters
----------
data: ixmp.TimeSeries, ixmp.Scenario, pd.DataFrame or data file
an instance of an TimeSeries or Scenario (requires `ixmp`),
or pd.DataFrame or data file with IAMC-format data columns.
A pd.DataFrame can have the required data as columns or index.
Special support is provided for data files downloaded directly from
IIASA SSP and RCP databases. If you run into any problems loading
data, please make an issue at:
https://github.com/IAMconsortium/pyam/issues
"""
# import data from pd.DataFrame or read from source
if isinstance(data, pd.DataFrame):
self.data = format_data(data.copy())
elif has_ix and isinstance(data, ixmp.TimeSeries):
self.data = read_ix(data, **kwargs)
else:
self.data = read_files(data, **kwargs)
# cast year column to `int` if necessary
if not self.data.year.dtype == 'int64':
self.data.year = cast_years_to_int(self.data.year)
# define a dataframe for categorization and other metadata indicators
self.meta = self.data[META_IDX].drop_duplicates().set_index(META_IDX)
self.reset_exclude()
# execute user-defined code
if 'exec' in run_control():
self._execute_run_control()
def __getitem__(self, key):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__getitem__(key)
else:
return self.data.__getitem__(key)
def __setitem__(self, key, value):
_key_check = [key] if isstr(key) else key
if set(_key_check).issubset(self.meta.columns):
return self.meta.__setitem__(key, value)
else:
return self.data.__setitem__(key, value)
def __len__(self):
return self.data.__len__()
def _execute_run_control(self):
for module_block in run_control()['exec']:
fname = module_block['file']
functions = module_block['functions']
dirname = os.path.dirname(fname)
if dirname:
sys.path.append(dirname)
module = os.path.basename(fname).split('.')[0]
mod = importlib.import_module(module)
for func in functions:
f = getattr(mod, func)
f(self)
def head(self, *args, **kwargs):
"""Identical to pd.DataFrame.head() operating on data"""
return self.data.head(*args, **kwargs)
def tail(self, *args, **kwargs):
"""Identical to pd.DataFrame.tail() operating on data"""
return self.data.tail(*args, **kwargs)
def models(self):
"""Get a list of models"""
return pd.Series(self.meta.index.levels[0])
def scenarios(self):
"""Get a list of scenarios"""
return pd.Series(self.meta.index.levels[1])
def regions(self):
"""Get a list of regions"""
return pd.Series(self.data['region'].unique(), name='region')
def variables(self, include_units=False):
"""Get a list of variables
Parameters
----------
include_units: boolean, default False
include the units
"""
if include_units:
return self.data[['variable', 'unit']].drop_duplicates()\
.reset_index(drop=True).sort_values('variable')
else:
return pd.Series(self.data.variable.unique(), name='variable')
def append(self, other, ignore_meta_conflict=False, inplace=False,
**kwargs):
"""Append any castable object to this IamDataFrame.
Columns in `other.meta` that are not in `self.meta` are always merged,
duplicate region-variable-unit-year rows raise a ValueError.
Parameters
----------
other: pyam.IamDataFrame, ixmp.TimeSeries, ixmp.Scenario,
pd.DataFrame or data file
An IamDataFrame, TimeSeries or Scenario (requires `ixmp`),
pandas.DataFrame or data file with IAMC-format data columns
ignore_meta_conflict : bool, default False
If False and `other` is an IamDataFrame, raise an error if
any meta columns present in `self` and `other` are not identical.
inplace : bool, default False
If True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
if not isinstance(other, IamDataFrame):
other = IamDataFrame(other, **kwargs)
ignore_meta_conflict = True
diff = other.meta.index.difference(ret.meta.index)
intersect = other.meta.index.intersection(ret.meta.index)
# merge other.meta columns not in self.meta for existing scenarios
if not intersect.empty:
# if not ignored, check that overlapping meta dataframes are equal
if not ignore_meta_conflict:
cols = [i for i in other.meta.columns if i in ret.meta.columns]
if not ret.meta.loc[intersect, cols].equals(
other.meta.loc[intersect, cols]):
conflict_idx = (
pd.concat([ret.meta.loc[intersect, cols],
other.meta.loc[intersect, cols]]
).drop_duplicates()
.index.drop_duplicates()
)
msg = 'conflict in `meta` for scenarios {}'.format(
[i for i in pd.DataFrame(index=conflict_idx).index])
raise ValueError(msg)
cols = [i for i in other.meta.columns if i not in ret.meta.columns]
_meta = other.meta.loc[intersect, cols]
ret.meta = ret.meta.merge(_meta, how='outer',
left_index=True, right_index=True)
# join other.meta for new scenarios
if not diff.empty:
# sorting not supported by ` pd.append()` prior to version 23
sort_kwarg = {} if int(pd.__version__.split('.')[1]) < 23 \
else dict(sort=False)
ret.meta = ret.meta.append(other.meta.loc[diff, :], **sort_kwarg)
# append other.data (verify integrity for no duplicates)
ret.data.set_index(LONG_IDX, inplace=True)
other.data.set_index(LONG_IDX, inplace=True)
ret.data = ret.data.append(other.data, verify_integrity=True)\
.reset_index(drop=False)
if not inplace:
return ret
def pivot_table(self, index, columns, values='value',
aggfunc='count', fill_value=None, style=None):
"""Returns a pivot table
Parameters
----------
index: str or list of strings
rows for Pivot table
columns: str or list of strings
columns for Pivot table
values: str, default 'value'
dataframe column to aggregate or count
aggfunc: str or function, default 'count'
function used for aggregation,
accepts 'count', 'mean', and 'sum'
fill_value: scalar, default None
value to replace missing values with
style: str, default None
output style for pivot table formatting
accepts 'highlight_not_max', 'heatmap'
"""
index = [index] if isstr(index) else index
columns = [columns] if isstr(columns) else columns
df = self.data
# allow 'aggfunc' to be passed as string for easier user interface
if isstr(aggfunc):
if aggfunc == 'count':
df = self.data.groupby(index + columns, as_index=False).count()
fill_value = 0
elif aggfunc == 'mean':
df = self.data.groupby(index + columns, as_index=False).mean()\
.round(2)
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
elif aggfunc == 'sum':
aggfunc = np.sum
fill_value = 0 if style == 'heatmap' else ""
df = df.pivot_table(values=values, index=index, columns=columns,
aggfunc=aggfunc, fill_value=fill_value)
return df
def interpolate(self, year):
"""Interpolate missing values in timeseries (linear interpolation)
Parameters
----------
year: int
year to be interpolated
"""
df = self.pivot_table(index=IAMC_IDX, columns=['year'],
values='value', aggfunc=np.sum)
# drop year-rows where values are already defined
if year in df.columns:
df = df[np.isnan(df[year])]
fill_values = df.apply(fill_series,
raw=False, axis=1, year=year)
fill_values = fill_values.dropna().reset_index()
fill_values = fill_values.rename(columns={0: "value"})
fill_values['year'] = year
self.data = self.data.append(fill_values, ignore_index=True)
def as_pandas(self, with_metadata=False):
"""Return this as a pd.DataFrame
Parameters
----------
with_metadata : bool, default False
if True, join data with existing metadata
"""
df = self.data
if with_metadata:
df = (df
.set_index(META_IDX)
.join(self.meta)
.reset_index()
)
return df
def timeseries(self):
"""Returns a dataframe in the standard IAMC format
"""
return (
self.data
.pivot_table(index=IAMC_IDX, columns='year')
.value # column name
.rename_axis(None, axis=1)
)
def reset_exclude(self):
"""Reset exclusion assignment for all scenarios to `exclude: False`"""
self.meta['exclude'] = False
def set_meta(self, meta, name=None, index=None):
"""Add metadata columns as pd.Series, list or value (int/float/str)
Parameters
----------
meta: pd.Series, list, int, float or str
column to be added to metadata
(by `['model', 'scenario']` index if possible)
name: str, optional
meta column name (defaults to meta pd.Series.name);
either a meta.name or the name kwarg must be defined
index: pyam.IamDataFrame, pd.DataFrame or pd.MultiIndex, optional
index to be used for setting meta column (`['model', 'scenario']`)
"""
if (name or (hasattr(meta, 'name') and meta.name)) in [None, False]:
raise ValueError('Must pass a name or use a named pd.Series')
# check if meta has a valid index and use it for further workflow
if hasattr(meta, 'index') and hasattr(meta.index, 'names') \
and set(META_IDX).issubset(meta.index.names):
index = meta.index
# if no valid index is provided, add meta as new column `name` and exit
if index is None:
self.meta[name] = list(meta) if islistable(meta) else meta
return # EXIT FUNCTION
# use meta.index if index arg is an IamDataFrame
if isinstance(index, IamDataFrame):
index = index.meta.index
# turn dataframe to index if index arg is a DataFrame
if isinstance(index, pd.DataFrame):
index = index.set_index(META_IDX).index
if not isinstance(index, pd.MultiIndex):
raise ValueError('index cannot be coerced to pd.MultiIndex')
# raise error if index is not unique
if index.duplicated().any():
raise ValueError("non-unique ['model', 'scenario'] index!")
# create pd.Series from meta, index and name if provided
meta = pd.Series(data=meta, index=index, name=name)
meta.name = name = name or meta.name
# reduce index dimensions to model-scenario only
meta = (
meta
.reset_index()
.reindex(columns=META_IDX + [name])
.set_index(META_IDX)
)
# check if trying to add model-scenario index not existing in self
diff = meta.index.difference(self.meta.index)
if not diff.empty:
error = "adding metadata for non-existing scenarios '{}'!"
raise ValueError(error.format(diff))
self._new_meta_column(name)
self.meta[name] = meta[name].combine_first(self.meta[name])
def categorize(self, name, value, criteria,
color=None, marker=None, linestyle=None):
"""Assign scenarios to a category according to specific criteria
or display the category assignment
Parameters
----------
name: str
category column name
value: str
category identifier
criteria: dict
dictionary with variables mapped to applicable checks
('up' and 'lo' for respective bounds, 'year' for years - optional)
color: str
assign a color to this category for plotting
marker: str
assign a marker to this category for plotting
linestyle: str
assign a linestyle to this category for plotting
"""
# add plotting run control
for kind, arg in [('color', color), ('marker', marker),
('linestyle', linestyle)]:
if arg:
run_control().update({kind: {name: {value: arg}}})
# find all data that matches categorization
rows = _apply_criteria(self.data, criteria,
in_range=True, return_test='all')
idx = _meta_idx(rows)
if len(idx) == 0:
logger().info("No scenarios satisfy the criteria")
return # EXIT FUNCTION
# update metadata dataframe
self._new_meta_column(name)
self.meta.loc[idx, name] = value
msg = '{} scenario{} categorized as `{}: {}`'
logger().info(msg.format(len(idx), '' if len(idx) == 1 else 's',
name, value))
def _new_meta_column(self, name):
"""Add a column to meta if it doesn't exist, set to value `np.nan`"""
if name is None:
raise ValueError('cannot add a meta column `{}`'.format(name))
if name not in self.meta:
self.meta[name] = np.nan
def require_variable(self, variable, unit=None, year=None,
exclude_on_fail=False):
"""Check whether all scenarios have a required variable
Parameters
----------
variable: str
required variable
unit: str, default None
name of unit (optional)
years: int or list, default None
years (optional)
exclude: bool, default False
flag scenarios missing the required variables as `exclude: True`
"""
criteria = {'variable': variable}
if unit:
criteria.update({'unit': unit})
if year:
criteria.update({'year': year})
keep = _apply_filters(self.data, self.meta, criteria)
idx = self.meta.index.difference(_meta_idx(self.data[keep]))
n = len(idx)
if n == 0:
logger().info('All scenarios have the required variable `{}`'
.format(variable))
return
msg = '{} scenario does not include required variable `{}`' if n == 1 \
else '{} scenarios do not include required variable `{}`'
if exclude_on_fail:
self.meta.loc[idx, 'exclude'] = True
msg += ', marked as `exclude: True` in metadata'
logger().info(msg.format(n, variable))
return pd.DataFrame(index=idx).reset_index()
def validate(self, criteria={}, exclude_on_fail=False):
"""Validate scenarios using criteria on timeseries values
Parameters
----------
criteria: dict
dictionary with variable keys and check values
('up' and 'lo' for respective bounds, 'year' for years)
exclude_on_fail: bool, default False
flag scenarios failing validation as `exclude: True`
"""
df = _apply_criteria(self.data, criteria, in_range=False)
if not df.empty:
msg = '{} of {} data points to not satisfy the criteria'
logger().info(msg.format(len(df), len(self.data)))
if exclude_on_fail and len(df) > 0:
self._exclude_on_fail(df)
return df
def rename(self, mapping, inplace=False):
"""Rename and aggregate column entries using `groupby.sum()` on values.
When renaming models or scenarios, the uniqueness of the index must be
maintained, and the function will raise an error otherwise.
Parameters
----------
mapping: dict
for each column where entries should be renamed, provide current
name and target name
{<column name>: {<current_name_1>: <target_name_1>,
<current_name_2>: <target_name_2>}}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for col, _mapping in mapping.items():
if col in ['model', 'scenario']:
index = pd.DataFrame(index=ret.meta.index).reset_index()
index.loc[:, col] = index.loc[:, col].replace(_mapping)
if index.duplicated().any():
raise ValueError('Renaming to non-unique {} index!'
.format(col))
ret.meta.index = index.set_index(META_IDX).index
elif col not in ['region', 'variable', 'unit']:
raise ValueError('Renaming by {} not supported!'.format(col))
ret.data.loc[:, col] = ret.data.loc[:, col].replace(_mapping)
ret.data = ret.data.groupby(LONG_IDX).sum().reset_index()
if not inplace:
return ret
def convert_unit(self, conversion_mapping, inplace=False):
"""Converts units based on provided unit conversion factors
Parameters
----------
conversion_mapping: dict
for each unit for which a conversion should be carried out,
provide current unit and target unit and conversion factor
{<current unit>: [<target unit>, <conversion factor>]}
inplace: bool, default False
if True, do operation inplace and return None
"""
ret = copy.deepcopy(self) if not inplace else self
for current_unit, (new_unit, factor) in conversion_mapping.items():
factor = pd.to_numeric(factor)
where = ret.data['unit'] == current_unit
ret.data.loc[where, 'value'] *= factor
ret.data.loc[where, 'unit'] = new_unit
if not inplace:
return ret
def check_aggregate(self, variable, components=None, units=None,
exclude_on_fail=False, multiplier=1, **kwargs):
"""Check whether the timeseries data match the aggregation
of components or sub-categories
Parameters
----------
variable: str
variable to be checked for matching aggregation of sub-categories
components: list of str, default None
list of variables, defaults to all sub-categories of `variable`
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
multiplier: number, default 1
factor when comparing variable and sum of components
kwargs: passed to `np.isclose()`
"""
# default components to all variables one level below `variable`
if components is None:
components = self.filter(variable='{}|*'.format(variable),
level=0).variables()
if not len(components):
msg = '{} - cannot check aggregate because it has no components'
logger().info(msg.format(variable))
return
# filter and groupby data, use `pd.Series.align` for matching index
df_variable, df_components = (
_aggregate_by_variables(self.data, variable, units)
.align(_aggregate_by_variables(self.data, components, units))
)
# use `np.isclose` for checking match
diff = df_variable[~np.isclose(df_variable, multiplier * df_components,
**kwargs)]
if len(diff):
msg = '{} - {} of {} data points are not aggregates of components'
logger().info(msg.format(variable, len(diff), len(df_variable)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff = pd.concat([diff], keys=[variable], names=['variable'])
return diff.unstack().rename_axis(None, axis=1)
def check_aggregate_regions(self, variable, region='World',
components=None, units=None,
exclude_on_fail=False, **kwargs):
"""Check whether the region timeseries data match the aggregation
of components
Parameters
----------
variable: str
variable to be checked for matching aggregation of components data
region: str
region to be checked for matching aggregation of components data
components: list of str, default None
list of regions, defaults to all regions except region
units: str or list of str, default None
filter variable and components for given unit(s)
exclude_on_fail: boolean, default False
flag scenarios failing validation as `exclude: True`
kwargs: passed to `np.isclose()`
"""
var_df = self.filter(variable=variable, level=0)
if components is None:
components = var_df.filter(region=region, keep=False).regions()
if not len(components):
msg = (
'{} - cannot check regional aggregate because it has no '
'regional components'
)
logger().info(msg.format(variable))
return None
# filter and groupby data, use `pd.Series.align` for matching index
df_region, df_components = (
_aggregate_by_regions(var_df.data, region, units)
.align(_aggregate_by_regions(var_df.data, components, units))
)
df_components.index = df_components.index.droplevel(
"variable"
)
# Add in variables that are included in region totals but which
# aren't included in the regional components.
# For example, if we are looking at World and Emissions|BC, we need
# to add aviation and shipping to the sum of Emissions|BC for each
# of World's regional components to do a valid check.
different_region = components[0]
variable_components = self.filter(
variable="{}|*".format(variable)
).variables()
for var_to_add in variable_components:
var_rows = self.data.variable == var_to_add
region_rows = self.data.region == different_region
var_has_regional_info = (var_rows & region_rows).any()
if not var_has_regional_info:
df_var_to_add = self.filter(
region=region, variable=var_to_add
).data.groupby(REGION_IDX).sum()['value']
df_var_to_add.index = df_var_to_add.index.droplevel("variable")
if len(df_var_to_add):
df_components = df_components.add(df_var_to_add,
fill_value=0)
df_components = pd.concat([df_components], keys=[variable],
names=['variable'])
# use `np.isclose` for checking match
diff = df_region[~np.isclose(df_region, df_components, **kwargs)]
if len(diff):
msg = (
'{} - {} of {} data points are not aggregates of regional '
'components'
)
logger().info(msg.format(variable, len(diff), len(df_region)))
if exclude_on_fail:
self._exclude_on_fail(diff.index.droplevel([2, 3]))
diff =
|
pd.concat([diff], keys=[region], names=['region'])
|
pandas.concat
|
# Pre Processing
import pandas as pd
from glob import glob
import numpy as np
from pathlib import Path
def main(infile, path_onehot):
### Principal Dataset
print(f'Reading {infile}...')
data = pd.read_csv(infile)
# Aliases.
Heavy_Partial = data[['Hchain']].copy()
Light_Partial = data[['Lchain']].copy()
#### Get_dummies For Sequence
print('One-hot encoding sequences...')
ByPosition_Amino_Heavy = Heavy_Partial['Hchain'].apply(lambda x:pd.Series(list(x)))
Light_Partial['Lchain'] = Light_Partial['Lchain'].astype(str)
ByPosition_Amino_Light = Light_Partial.Lchain.apply(lambda x:pd.Series(list(x)))
Heavy_Partial = Heavy_Partial.drop(['Hchain'], axis=1)
Heavy_Partial = pd.concat([Heavy_Partial, pd.get_dummies(ByPosition_Amino_Heavy)], axis=1)
Light_Partial = Light_Partial.drop(['Lchain'], axis=1)
Light_Partial = pd.concat([Light_Partial, pd.get_dummies(ByPosition_Amino_Light)], axis=1)
Heavy_Partial = Heavy_Partial.add_suffix('_heavy')
Light_Partial = Light_Partial.add_suffix('_light')
POSITION = list(range(0, 151))
AMINO = 'ARNDCEQGHILKMFPSTWYV'
for i in POSITION:
for j in AMINO:
new_col = str(i) + "_" + str(j)+ "_heavy"
if str(new_col) not in Heavy_Partial:
Heavy_Partial[str(new_col)] = 0
for i in POSITION:
for j in AMINO:
new_col = str(i) + "_" + str(j)+ "_light"
if str(new_col) not in Light_Partial:
Light_Partial[str(new_col)] = 0
onehot_encoded =
|
pd.concat([Heavy_Partial, Light_Partial], axis=1)
|
pandas.concat
|
# Developer: <NAME>
# Date: 06-17-2019
# Include CF.py file for Random Subspace Ensemble Classifier based on Conic Functions
# Datasets' last column should be class information.
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import time
import numpy as np
from CF import EnsambleCF
from sklearn import preprocessing
np.random.seed(0)
# 1 if separated test file, 0 is cross validation
if 1:
dfTrain = pd.read_csv("/users/path/train.csv", header=None)
dfTest =
|
pd.read_csv("/users/path/test.csv", header=None)
|
pandas.read_csv
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats as stats
import os
import matplotlib.pyplot as plt
import traceback
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels
import bambi as bmb
import arviz as az
import sklearn
from datasets.models import RawFlower, RawUNM, RawDAR
from django.contrib.auth.models import User
from api.dilutionproc import predict_dilution
from api import adapters
def getCorrelationPerVisit(data, x_cols, y_cols, corr_method):
'returnn correlationns for sets of features per time period / visit'
for col in [x_cols] + [y_cols]:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
df1 = data
rez = []
seen = []
N = None
for x in x_cols:
for y in y_cols:
if x!=y:
for visit in df1['TimePeriod'].unique():
df_visit = df1[df1['TimePeriod']== visit]
try:
temp = df_visit[(~df_visit[x].isna()) & (~df_visit[y].isna()) ]
N = temp.shape[0]
if corr_method == 'spearman':
spearman = stats.spearmanr(temp[x], temp[y])
rez.append([x,y,N,visit,spearman.correlation,spearman.pvalue])
else:
spearman = stats.pearsonr(temp[x], temp[y])
rez.append([x,y,N,visit,spearman[0],spearman[1]])
except:
print('err')
return pd.DataFrame(rez, columns = ['x','y','N','visit','corr','pval']).sort_values(by = 'pval')
def getCorrelation(data, x_cols, y_cols, corr_method):
for col in [x_cols] + [y_cols]:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
df1 = data
rez = []
seen = []
N = None
for x in x_cols:
for y in y_cols:
if x!=y:
try:
temp = df1[(~df1[x].isna()) & (~df1[y].isna())]
N = temp.shape[0]
if corr_method == 'spearman':
spearman = stats.spearmanr(temp[x], temp[y])
rez.append([x,y,N,spearman.correlation,spearman.pvalue])
else:
spearman = stats.pearsonr(temp[x], temp[y])
rez.append([x,y,N,spearman[0],spearman[1]])
except:
print('err')
return pd.DataFrame(rez, columns = ['x','y','N','corr','pval']).sort_values(by = 'pval')
def corr_sig(df=None):
p_matrix = np.zeros(shape=(df.shape[1],df.shape[1]))
for col in df.columns:
for col2 in df.drop(col,axis=1).columns:
df_temp = df[(~df[col].isna()) & (~df[col2].isna())]
if df_temp.shape[0] > 2:
spearman = stats.spearmanr(df_temp[col], df_temp[col2])
p_matrix[df.columns.to_list().index(col),df.columns.to_list().index(col2)] = spearman.pvalue
else:
p_matrix[df.columns.to_list().index(col),df.columns.to_list().index(col2)] = 1
return p_matrix
def getCorrelationHeatmap(data, to_corr_cols):
for col in to_corr_cols:
try:
data[col] = data[col].astype(float)
data.loc[data[x] < 0, x] = np.nan
except:
data[col] = data[col]
#sns.set_theme(style="white",font_scale=1.75)
# Compute the correlation matrix
corr = data[to_corr_cols].corr(method = 'spearman').round(4)
# Generate a mask for the upper triangle
p_values = corr_sig(data[to_corr_cols])
mask = np.invert(np.tril(p_values<0.05))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(40, 30))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(0, 230, as_cmap=True)
g = sns.heatmap(corr,
cmap = cmap, vmax=.3, center=0, annot = True,
square=True, linewidths=.5, annot_kws={"size": 35}, mask=mask)
#g.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xmajorticklabels(), fontsize = 40)
g.set_xticklabels(g.get_xmajorticklabels(), fontsize = 30, rotation = 90)
g.set_yticklabels(g.get_ymajorticklabels(), fontsize = 30, rotation = 0)
# Draw the heatmap with the mask and correct aspect ratio
return g
def cohortdescriptive(df_all):
'fuction that returns count, mean, and std per cohort'
df_all = df_all.drop_duplicates(['CohortType','PIN_Patient','TimePeriod'])
b = df_all.groupby(['CohortType']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=['NEU','DAR','UNM'])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def q1(x):
return x.quantile(0.25)
def q2(x):
return x.median()
def q3(x):
return x.quantile(0.75)
def cohortdescriptive_all(df_all):
' summary; minimum, quartile 1, median, quartile 3, and maximum.'
#df_all = df_all.drop_duplicates(['CohortType','PIN_Patient','TimePeriod'])
df_all = df_all.select_dtypes(include=['float64'])
categorical = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education']
df_all = df_all.loc[:, ~df_all.columns.isin(categorical)]
#b = df_all.agg(['count','mean','std',lambda x: x.quantile(0.25), lambda x: x.quantile(0.50)])
df_all[df_all < 0 ] = np.nan
b = df_all.agg(['count','mean','std','min', q1, 'median', q3, 'max']).transpose().round(4)
return b
def cohortdescriptiveOverall(data):
for col in data.columns:
try:
data[col] = data[col].astype(float)
except:
data[col] = data[col]
df_all = data
cohort = df_all['CohortType'].unique()[0]
b = df_all.groupby(['CohortType']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=[cohort])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def cohortDescriptiveByOutcome(data):
for col in data.columns:
try:
data[col] = data[col].astype(float)
except:
data[col] = data[col]
b = data.groupby(['Outcome']).agg(['count','mean','std']).transpose().reset_index()
df2 = b.pivot(index='level_0', columns='level_1', values=[0.0,1.0])
df2.columns = list(map("_".join, [[str(x[0]),x[1]] for x in list(df2.columns)]))
return df2
def oneHotEncoding(df, toencode):
#TODO: add onehot encoding for race, gender, etc.
for var in toencode:
dum = pd.get_dummies(df[var], prefix=var)
return dum
def merge3CohortFrames(df1,df2,df3):
'merge on feature intersections'
#only consider visit 2 for NEU
df2 = df2[df2['TimePeriod'] == 2]
for as_feature in ['UASB', 'UDMA', 'UAS5', 'UIAS', 'UAS3', 'UMMA']:
if as_feature not in df1.columns:
df1[as_feature] = np.nan
if as_feature not in df2.columns:
df2[as_feature] = np.nan
if as_feature not in df3.columns:
df3[as_feature] = np.nan
s1 = set(df1.columns)
s2 = set(df2.columns)
s3 = set(df3.columns)
cc = set.intersection(s1, s2, s3)
df_all = pd.concat([df1[cc],df2[cc],df3[cc]])
for x in df_all:
try:
df_all[x] = df_all[x].astype(float)
except:
pass
return df_all
def merge2CohortFrames(df1,df2):
'merge on feature intersections'
for as_feature in ['UASB', 'UDMA', 'UAS5', 'UIAS', 'UAS3', 'UMMA']:
if as_feature not in df1.columns:
df1[as_feature] = np.nan
if as_feature not in df2.columns:
df2[as_feature] = np.nan
s1 = set(df1.columns)
s2 = set(df2.columns)
cc = set.intersection(s1, s2)
df_all = pd.concat([df1[cc],df2[cc]])
for x in df_all:
try:
df_all[x] = df_all[x].astype(float)
except:
pass
return df_all
def categoricalCounts(df):
#each participant should only have 1 measurment per fvariable
cohort = df['CohortType'].unique()
categorical1 = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education']
categorical2 = ['CohortType','TimePeriod','Member_c','Outcome','folic_acid_supp', 'PIN_Patient',
'ethnicity','race','smoking','preg_complications','babySex','LGA','SGA','education','GDMtest1','GDMtest2']
#TODO: fix this should detect the dataset type
try:
df22 = df[categorical1].drop_duplicates(['PIN_Patient'])
categorical1.remove('PIN_Patient')
df22 = df22[categorical1]
melted = pd.melt(df22,id_vars=['CohortType'])
df33 = melted.groupby(['variable','value'])['value'].count()
df33.index.names = ['variable', 'cat']
except:
df22 = df[categorical2].drop_duplicates(['PIN_Patient'])
categorical2.remove('PIN_Patient')
df22 = df22[categorical2]
melted =
|
pd.melt(df22,id_vars=['CohortType'])
|
pandas.melt
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import pandas as pd
import time
import math
import datetime
def getComment(justification, lawsProvisions):
comment = ""
#filter out value NaN = float:
if isinstance(justification, unicode) and justification!="<Null>":
comment+=justification
if isinstance(lawsProvisions, unicode) and lawsProvisions!="<Null>" and lawsProvisions!="Not applicable":
comment+="\n"+lawsProvisions
return comment
input_filename = "LegalSecurityIndicators_2018_04_09.xls"
# save to file
timestr = time.strftime("%Y%m%d-%H%M%S")
filename_output = timestr+"-LMM-LSIC.xlsx"
xls = pd.ExcelFile(input_filename)
sheetCommunity = xls.parse(sheet_name="Community", header=0, keep_default_na=False, na_values=["N/A"])
#head = sheetCommunity.head(0)
sheetIndigenousPeople = xls.parse(sheet_name="IP", header=0, keep_default_na=False, na_values=["N/A"])
# add headers
df =
|
pd.DataFrame(columns=["indicator", "country", "year", "value", "comment"])
|
pandas.DataFrame
|
import argparse
import numpy as np
import pandas as pd
import sys
import datetime as dt
from dateutil.parser import parse
from agent.ExchangeAgent import ExchangeAgent
from agent.NoiseAgent import NoiseAgent
from agent.ValueAgent import ValueAgent
from agent.examples.MarketMakerAgent import MarketMakerAgent
from agent.examples.MomentumAgent import MomentumAgent
from agent.execution.TWAPExecutionAgent import TWAPExecutionAgent
from agent.execution.VWAPExecutionAgent import VWAPExecutionAgent
from Kernel import Kernel
from util import util
from util.order import LimitOrder
from util.oracle.ExternalFileOracle import ExternalFileOracle
########################################################################################################################
############################################### GENERAL CONFIG #########################################################
parser = argparse.ArgumentParser(description='Detailed options for market replay config.')
parser.add_argument('-c',
'--config',
required=True,
help='Name of config file to execute')
parser.add_argument('-t',
'--ticker',
required=True,
help='Ticker (symbol) to use for simulation')
parser.add_argument('-d', '--historical-date',
required=True,
type=parse,
help='historical date being simulated in format YYYYMMDD.')
parser.add_argument('-f',
'--fundamental-file-path',
required=True,
help="Path to external fundamental file.")
parser.add_argument('-e',
'--execution_agents',
action='store_true',
help='Flag to add the execution agents')
parser.add_argument('-s',
'--seed',
type=int,
default=None,
help='numpy.random.seed() for simulation')
parser.add_argument('-l',
'--log_dir',
default=None,
help='Log directory name (default: unix timestamp at program start)')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Maximum verbosity!')
parser.add_argument('--config_help',
action='store_true',
help='Print argument options for this config file')
args, remaining_args = parser.parse_known_args()
if args.config_help:
parser.print_help()
sys.exit()
seed = args.seed # Random seed specification on the command line.
if not seed: seed = int(pd.Timestamp.now().timestamp() * 1000000) % (2 ** 32 - 1)
np.random.seed(seed)
util.silent_mode = not args.verbose
LimitOrder.silent_mode = not args.verbose
simulation_start_time = dt.datetime.now()
print("Simulation Start Time: {}".format(simulation_start_time))
print("Configuration seed: {}".format(seed))
######################## Agents Config #########################################################################
# Historical date to simulate.
historical_date_pd = pd.to_datetime(args.historical_date)
mkt_open = historical_date_pd +
|
pd.to_timedelta('09:30:00')
|
pandas.to_timedelta
|
"""
Some of the runs were super unstable with smallest epsilon, so need to make some exceptions
"""
import pandas as pd
import pickle
import numpy as np
from itertools import product
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.split(script_dir)[0] + "/"
## Load DPVI fits
epsilons = [0.74, 1.99, 3.92]
#epsilons = [1.99, 3.92]
epsilons = np.array(epsilons)
seeds = range(1234,1244)
n_runs = len(seeds)*10
## No stratification
## For females
syn_no_strat_coef_female_dict = {}
syn_no_strat_p_value_female_dict = {}
for eps in epsilons:
female_coefs = []
for seed in seeds:
for rep in range(10):
try:
female_coef_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/female_coef_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0)
female_p_value_df = pd.read_csv(parent_dir+'R/ablation_study/no_strat/csvs/female_p_value_matrix_dpvi_{}_{}_{}.csv'.format(seed, eps, rep), index_col=0)
if len(female_coefs)==0:
female_coefs = female_coef_df
female_p_values = female_p_value_df
else:
female_coefs = pd.concat([female_coefs, female_coef_df], axis=1)
female_p_values = pd.concat([female_p_values, female_p_value_df], axis=1)
except:
pass
syn_no_strat_coef_female_df = pd.DataFrame(female_coefs.values.T, columns=female_coefs.index)
syn_no_strat_p_value_female_df =
|
pd.DataFrame(female_p_values.values.T, columns=female_p_values.index)
|
pandas.DataFrame
|
# -*- coding: UTF-8 -*-
import pandas as pd
import tushare as ts
import time
import matplotlib.pyplot as plt
from function import getNdatAgo
from sqlalchemy import create_engine
from configparser import ConfigParser
cf = ConfigParser()
cf.read('./gpst.conf')
dbHost = cf.get("db", "dbHost")
dbPort = cf.get("db", "dbPort")
dbUser = cf.get("db", "dbUser")
dbPass = cf.get("db", "dbPass")
dbName = cf.get("db", "dbName")
engine = create_engine(
"mysql://" + dbUser + ":" + dbPass + "@" + dbHost + ":" + dbPort + "/" + dbName + "?charset=utf8")
conn = engine.connect()
def draw(code) :
# 获取数据
# df = ts.get_k_data(code, start="2017-09-01")
tDate = time.strftime("%Y-%m-%d", time.localtime())
nDate = getNdatAgo(tDate, 100)
sql = "SELECT * FROM finance.tick_data WHERE code = '" + code + "' AND `date` > '" + nDate + "'"
df =
|
pd.read_sql(sql, con=engine)
|
pandas.read_sql
|
import jieba
import jieba.analyse as analyse
import jieba.posseg # 输出带词性
import copy
import wordcloud
import streamlit as st
import pandas as pd
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib
from wordcloud import WordCloud # 词云包
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import imageio
matplotlib.rcParams['figure.figsize'] = (18.0, 18.0) # 定义长宽
from wordcloud import WordCloud, ImageColorGenerator
import pylab
data = list(open(r'D:\conda\TF-idf\data\test.txt', encoding='utf-8'))
datawenzi = copy.copy(data[0])
data = jieba.lcut(data[0]) # 输出不带词性
# print(jieba.posseg.lcut(data[0])) # 输出带词性
data = pd.DataFrame(data, columns=['ci'])
# 方法一:用正则表达式的形式去除掉停用词中的换行符号
# stop=list(open(r'D:\conda\TF-idf\data\stopwords.txt',encoding='utf-8'))
# for i in range(len(stop)):
# stop[i]=re.sub('\n','',stop[i])
# stop=pd.DataFrame(stop,columns=['stop'])
# 方法二:直接用pd获取停用词大全 其中quoting=3 代表将 英文双引号的内容也要识别出来,而txt文件的默认编码方式为encoding='utf-8'
stop = pd.read_csv(r'D:\conda\TF-idf\data\stopwords.txt', encoding='utf-8', index_col=False, sep='\t', names=['stop'],
quoting=3)
data = data[~data.ci.isin(stop.stop)] # 用匹配的方式,将data中的停用词给去除
def cipin(data1): # 导入data
data1gr = data1.groupby('ci')['ci'].agg(np.size)
data1gr.name = 'shu'
data1gr = data1gr.reset_index().sort_values(by=['shu'], ascending=False)
return data1gr
def tf(data2): # 导入datawenzi
key = analyse.extract_tags(data2, topK=30, withWeight=True, allowPOS=()) # withWeight为加上权重
keyci = []
keyshu = []
for i in range(len(key)):
keyci.append(key[i][0])
keyshu.append(key[i][1])
keyci1 = pd.DataFrame(keyci, columns=['ci'])
keyshu1 = pd.DataFrame(keyshu, columns=['shu'])
keynew =
|
pd.concat([keyci1, keyshu1], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import sys
import os
import pandas as pd
PROJECT_ID = "dots-stock" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
USER = "shkim01" # <---CHANGE THIS
BUCKET_NAME = "gs://pipeline-dots-stock" # @param {type:"string"}
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline_root/{USER}"
from typing import NamedTuple
from kfp import dsl
from kfp.v2 import compiler
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics,
component)
from kfp.v2.google.client import AIPlatformClient
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
)
def get_market_info(
# top30_univ_dataset: Output[Dataset],
market_info_dataset: Output[Dataset],
today: str,
n_days: int
) -> str:
import pandas as pd
from pandas.tseries.offsets import CustomBusinessDay
from trading_calendars import get_calendar
import functools
import pickle
import logging
import networkx as nx
import os
from sqlalchemy import create_engine
# today = pd.Timestamp.now('Asia/Seoul').strftime('%Y%m%d')
# today = '20210809'
cal_KRX = get_calendar('XKRX')
custombd_KRX = CustomBusinessDay(holidays=cal_KRX.precomputed_holidays)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# Preference
#-----------------------------------------------------------------------------
AWS_DB_ID = 'gb_master'
AWS_DB_PWD = 'qwert12345'
AWS_DB_ADDRESS = 'kwdb-daily.cf6e7v8fhede.ap-northeast-2.rds.amazonaws.com'
AWS_DB_PORT = '3306'
DB_DATABASE_NAME_daily_naver = 'daily_naver'
PROJECT_ID = 'dots-stock'
db_daily_naver_con = create_engine('mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'
.format(AWS_DB_ID, AWS_DB_PWD, AWS_DB_ADDRESS, AWS_DB_PORT, DB_DATABASE_NAME_daily_naver),
encoding='utf8',
echo=False)
# @functools.lru_cache()
def get_market_from_naver_aws(date_ref):
'''
daily naver 에서 db값 그대로 parsing 내용 받아오기
'''
with db_daily_naver_con.connect() as conn:
table_name = f'{date_ref}_daily_allstock_naver'
str_sql = f'select * from {table_name} order by 등락률 DESC'
df = pd.read_sql_query(str_sql, conn) # self.get_db_daily_naver_con())
df = df.reset_index().rename(columns={'index':'순위_상승률', 'N':'순위_시가총액'})
df['순위_상승률'] = df.순위_상승률 + 1
return df
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
# 1. Market data
#------------------------------------------------------------------------------
def get_markets_aws(date_ref, n_days):
dates_n_days_ago = get_krx_on_dates_n_days_ago(date_ref, n_days)
df_market = pd.DataFrame()
for date in dates_n_days_ago:
df_ = get_market_from_naver_aws(date)
# logger.debug(f'date : {date} and df_.shape {df_.shape}' )
df_market = df_market.append(df_)
return df_market
df_market = get_markets_aws(date_ref=today, n_days=n_days)
df_market.to_csv(market_info_dataset.path)
return today
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_base_item(
market_info_dataset: Input[Dataset],
base_item_dataset: Output[Dataset]
):
import pandas as pd
# helper function
def get_top30_list(df_market):
cols_out = ['날짜','종목코드','종목명']
return (df_market
.sort_values(['날짜','등락률'], ascending=False)
.groupby('날짜')
.head(30)[cols_out])
df_market = pd.read_csv(market_info_dataset.path)
df_base_item = get_top30_list(df_market)
df_base_item.to_csv(base_item_dataset.path)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2"
)
def get_bros(
today: str,
n_days: int,
bros_univ_dataset: Output[Dataset]
) -> str :
'''
Returns:
list
'''
import pandas as pd
import pandas_gbq
import networkx as nx
from trading_calendars import get_calendar
PROJECT_ID = 'dots-stock'
cal_KRX = get_calendar('XKRX')
# helper functions
#-----------------------------------------------------------------------------
def get_krx_on_dates_n_days_ago(date_ref, n_days=20):
return [date.strftime('%Y%m%d')
for date in pd.bdate_range(
end=date_ref, freq='C', periods=n_days,
holidays=cal_KRX.precomputed_holidays) ]
def get_corr_pairs_gbq(date_ref, period):
date_ref_ = pd.Timestamp(date_ref).strftime('%Y-%m-%d')
sql = f'''
SELECT
DISTINCT source,
target,
corr_value,
period,
date
FROM
`dots-stock.krx_dataset.corr_ohlc_part1`
WHERE
date = "{date_ref_}"
AND period = {period}
ORDER BY
corr_value DESC
LIMIT
1000'''
df = pandas_gbq.read_gbq(sql, project_id=PROJECT_ID)
return df
def find_bros(date_ref, period):
'''clique over 3 nodes '''
df_edgelist = get_corr_pairs_gbq(date_ref, period)
g = nx.from_pandas_edgelist(df_edgelist, edge_attr=True)
bros_ = nx.find_cliques(g)
bros_3 = [bros for bros in bros_ if len(bros) >=3]
set_bros = set([i for l_i in bros_3 for i in l_i])
g_gang = g.subgraph(set_bros)
df_gangs_edgelist = nx.to_pandas_edgelist(g_gang)
return df_gangs_edgelist
def find_gang(date_ref):
df_gang = pd.DataFrame()
for period in [20, 40, 60, 90, 120]:
df_ = find_bros(date, period=period)
df_gang = df_gang.append(df_)
return df_gang
# jobs
dates = get_krx_on_dates_n_days_ago(date_ref=today, n_days=n_days)
df_bros = pd.DataFrame()
for date in dates:
df = find_gang(date_ref=date)
df_bros = df_bros.append(df)
df_bros.to_csv(bros_univ_dataset.path)
return 'OK'
@component(
base_image="amancevice/pandas:1.3.2-slim"
)
def get_univ_for_price(
# date_ref: str,
base_item_dataset: Input[Dataset],
bros_dataset: Input[Dataset],
univ_dataset: Output[Dataset],
):
import pandas as pd
import logging
import json
logger = logging.getLogger(__name__)
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
# base item
df_top30s = pd.read_csv(base_item_dataset.path,
index_col=0,
dtype={'날짜': str}).reset_index(drop=True)
# load edge_list to make bros
df_ed = pd.read_csv(bros_dataset.path, index_col=0).reset_index(drop=True)
df_ed_r = df_ed.copy()
df_ed_r.rename(columns={'target':'source', 'source':'target'}, inplace=True)
df_ed2 = df_ed.append(df_ed_r, ignore_index=True)
df_ed2['date'] = pd.to_datetime(df_ed2.date).dt.strftime('%Y%m%d')
dic_univ = {}
for date, df in df_top30s.groupby('날짜'):
logger.debug(f'date: {date}')
l_top30 = df.종목코드.to_list()
l_bro = df_ed2[(df_ed2.date == date) &
(df_ed2.source.isin(l_top30))].target.unique().tolist()
dic_univ[date] = list(set(l_top30 + l_bro ))
with open(univ_dataset.path, 'w', encoding='utf8') as f:
json.dump(dic_univ, f)
@component(
base_image="gcr.io/dots-stock/python-img-v5.2",
# packages_to_install = ["tables", "pandas_gbq", "finance-datareader", "bs4", "pickle5"] # add 20210715 FIX pipeline
)
def get_adj_prices(
today: str,
dic_univ_dataset: Input[Dataset],
adj_price_dataset: Output[Dataset]
) -> str:
import json
import FinanceDataReader as fdr
from ae_module.ae_logger import ae_log
import pandas as pd
# with open(dic_univ_dataset.path, 'rb') as f:
# dic_univ = pickle.load(f)
with open(dic_univ_dataset.path, 'r') as f:
dic_univ = json.load(f)
codes_stock = []
for v in dic_univ.values():
codes_stock.extend(v)
# drop duplicates
codes_stock = list(set(codes_stock))
def get_price_adj(code, start, end):
return fdr.DataReader(code, start=start, end=end)
def get_price(l_univ, date_start, date_end):
df_price = pd.DataFrame()
for code in l_univ :
df_ = get_price_adj(code, date_start, date_end)
df_['code'] = code
# df_['price'] = df_['Close'] / df_.Close.iloc[0]
df_price = df_price.append(df_)
return df_price
ae_log.debug(f'codes_stock {codes_stock.__len__()}')
date_start = '20210101'
date_end = today
df_adj_price = get_price(codes_stock, date_start=date_start, date_end=date_end)
df_adj_price.to_csv(adj_price_dataset.path)
ae_log.debug(df_adj_price.shape)
return 'good'
@component(
# base_image="gcr.io/deeplearning-platform-release/sklearn-cpu"
base_image="amancevice/pandas:1.3.2-slim"
)
def get_target(
df_price_dataset: Input[Dataset],
df_target_dataset: Output[Dataset]
):
import pandas as pd
import numpy as np
def make_target(df):
df_ = df.copy()
df_.sort_values(by='date', inplace=True)
df_['high_p1'] = df_.high.shift(-1)
df_['high_p2'] = df_.high.shift(-2)
df_['high_p3'] = df_.high.shift(-3)
df_['close_p1'] = df_.close.shift(-1)
df_['close_p2'] = df_.close.shift(-2)
df_['close_p3'] = df_.close.shift(-3)
df_['change_p1'] = (df_.close_p1 - df_.close) / df_.close
df_['change_p2'] = (df_.close_p2 - df_.close) / df_.close
df_['change_p3'] = (df_.close_p3 - df_.close) / df_.close
df_['change_p1_over5'] = df_['change_p1'] > 0.05
df_['change_p2_over5'] = df_['change_p2'] > 0.05
df_['change_p3_over5'] = df_['change_p3'] > 0.05
df_['change_p1_over10'] = df_['change_p1'] > 0.1
df_['change_p2_over10'] = df_['change_p2'] > 0.1
df_['change_p3_over10'] = df_['change_p3'] > 0.1
df_['close_high_1'] = (df_.high_p1 - df_.close) / df_.close
df_['close_high_2'] = (df_.high_p2 - df_.close) / df_.close
df_['close_high_3'] = (df_.high_p3 - df_.close) / df_.close
df_['close_high_1_over10'] = df_['close_high_1'] > 0.1
df_['close_high_2_over10'] = df_['close_high_2'] > 0.1
df_['close_high_3_over10'] = df_['close_high_3'] > 0.1
df_['close_high_1_over5'] = df_['close_high_1'] > 0.05
df_['close_high_2_over5'] = df_['close_high_2'] > 0.05
df_['close_high_3_over5'] = df_['close_high_3'] > 0.05
df_['target_over10'] = np.logical_or.reduce([
df_.close_high_1_over10,
df_.close_high_2_over10,
df_.close_high_3_over10])
df_['target_over5'] = np.logical_or.reduce([
df_.close_high_1_over5,
df_.close_high_2_over5,
df_.close_high_3_over5])
df_['target_close_over_10'] = np.logical_or.reduce([
df_.change_p1_over10,
df_.change_p2_over10,
df_.change_p3_over10])
df_['target_close_over_5'] = np.logical_or.reduce([
df_.change_p1_over5,
df_.change_p2_over5,
df_.change_p3_over5])
df_['target_mclass_close_over10_under5'] = \
np.where(df_['change_p1'] > 0.1,
1, np.where(df_['change_p1'] > -0.05, 0, -1))
df_['target_mclass_close_p2_over10_under5'] = \
np.where(df_['change_p2'] > 0.1,
1, np.where(df_['change_p2'] > -0.05, 0, -1))
df_['target_mclass_close_p3_over10_under5'] = \
np.where(df_['change_p3'] > 0.1,
1, np.where(df_['change_p3'] > -0.05, 0, -1))
df_.dropna(subset=['high_p3'], inplace=True)
return df_
def get_target_df(df_price):
df_price.reset_index(inplace=True)
df_price.columns = df_price.columns.str.lower()
df_target = df_price.groupby('code').apply(lambda df: make_target(df))
df_target = df_target.reset_index(drop=True)
# df_target['date'] = df_target.date.str.replace('-', '')
return df_target
df_price = pd.read_csv(df_price_dataset.path)
df_target = get_target_df(df_price=df_price)
df_target.to_csv(df_target_dataset.path)
@component(
base_image="gcr.io/deeplearning-platform-release/sklearn-cpu",
packages_to_install=["stockstats"]
)
def get_techindi(
df_price_dataset: Input[Dataset],
df_techini_dataset: Output[Dataset]
):
TECHNICAL_INDICATORS_LIST = ['macd',
'boll_ub',
'boll_lb',
'rsi_30',
'dx_30',
'close_30_sma',
'close_60_sma']
from stockstats import StockDataFrame as Sdf
from sklearn.preprocessing import MaxAbsScaler
import pandas as pd
class FeatureEngineer:
"""Provides methods for preprocessing the stock price data
Attributes
----------
use_technical_indicator : boolean
we technical indicator or not
tech_indicator_list : list
a list of technical indicator names (modified from config.py)
use_turbulence : boolean
use turbulence index or not
user_defined_feature:boolean
user user defined features or not
Methods
-------
preprocess_data()
main method to do the feature engineering
"""
def __init__(
self,
use_technical_indicator=True,
tech_indicator_list=TECHNICAL_INDICATORS_LIST,
user_defined_feature=False,
):
self.use_technical_indicator = use_technical_indicator
self.tech_indicator_list = tech_indicator_list
self.user_defined_feature = user_defined_feature
def preprocess_data(self, df):
"""main method to do the feature engineering
@:param config: source dataframe
@:return: a DataMatrices object
"""
#clean data
df = self.clean_data(df)
# add technical indicators using stockstats
if self.use_technical_indicator == True:
df = self.add_technical_indicator(df)
print("Successfully added technical indicators")
# add user defined feature
if self.user_defined_feature == True:
df = self.add_user_defined_feature(df)
print("Successfully added user defined features")
# fill the missing values at the beginning and the end
df = df.fillna(method="bfill").fillna(method="ffill")
return df
def clean_data(self, data):
"""
clean the raw data
deal with missing values
reasons: stocks could be delisted, not incorporated at the time step
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df=df.sort_values(['date','tic'],ignore_index=True)
df.index = df.date.factorize()[0]
merged_closes = df.pivot_table(index = 'date',columns = 'tic', values = 'close')
merged_closes = merged_closes.dropna(axis=1)
tics = merged_closes.columns
df = df[df.tic.isin(tics)]
return df
def add_technical_indicator(self, data):
"""
calculate technical indicators
use stockstats package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
df = data.copy()
df = df.sort_values(by=['tic','date'])
stock = Sdf.retype(df.copy())
unique_ticker = stock.tic.unique()
for indicator in self.tech_indicator_list:
indicator_df = pd.DataFrame()
for i in range(len(unique_ticker)):
try:
temp_indicator = stock[stock.tic == unique_ticker[i]][indicator]
temp_indicator =
|
pd.DataFrame(temp_indicator)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
we test .agg behavior / note that .apply is tested
generally in test_groupby.py
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
class TestGroupByAggregate(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes(self):
# GH 12821
df = DataFrame(
{'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.first(), exp)
assert_frame_equal(grouped.agg('first'), exp)
assert_frame_equal(grouped.agg({'time': 'first'}), exp)
assert_series_equal(grouped.time.first(), exp['time'])
assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.last(), exp)
assert_frame_equal(grouped.agg('last'), exp)
assert_frame_equal(grouped.agg({'time': 'last'}), exp)
assert_series_equal(grouped.time.last(), exp['time'])
assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.agg(len), exp)
assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes(self):
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
assert_series_equal(result, expected)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
pytest.raises(Exception, grouped.agg, lambda x: x.describe())
pytest.raises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert self.ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_float64_no_int64(self):
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5],
"c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation(self):
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(
w[0].message)
def test_agg_compat(self):
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
def f():
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
pytest.raises(SpecificationError, f)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import streamlit as st
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import pandas as pd
import pickle
import pydeck as pdk
from streamlit_folium import folium_static
import folium
#tensorflow==1.14
#keras==2.3.0
#scikit-learn==0.20.3
# loading in the model to predict on the data
pickle_in = open('svc.pkl', 'rb')
classifier = pickle.load(pickle_in)
st.sidebar.markdown("** Wildfire Prediction **")
st.sidebar.markdown('Try to predict wildfires yourself with Machine Learning')
def prediction(X,Y,month,day,FFMC,DMC,DC,ISI,temp,RH,wind,rain):
prediction = classifier.predict([[X,Y,month,day,FFMC,DMC,DC,ISI,temp,RH,wind,rain]])
#prediction = classifier.predict([[1, 4, 9 ,1 ,91.5, 130.1, 807.1, 7.5, 21.3, 35, 2.2, 0]])
#print(prediction)
classes={0:'safe',1:'On Fire'}
#print(classes[prediction[0]])
prediction2 = classes[prediction[0]]
st.sidebar.markdown('The area is:')
st.sidebar.text(prediction2)
return prediction2
@st.cache
def showMap():
plotData =
|
pd.read_csv("https://firms.modaps.eosdis.nasa.gov/data/active_fire/c6/csv/MODIS_C6_Global_24h.csv")
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
from ctrnn import CTRNN
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from Utilities import *
# In[ ]:
nn = CTRNN(2, weight_range=weight_range, bias_range=bias_range, tc_min=tc_min, tc_max=tc_max)
n_simulation = 20
target_period = 1
target_freq = 1/target_period
freq_ratios = np.linspace(0.1,0.9,9).round(3)
n_step = 150
results =
|
pd.DataFrame(columns = ['jump_size','Ratio','clip_change_max','Fitness','Period_performance','Change_performance'])
|
pandas.DataFrame
|
from sklearn.preprocessing import MultiLabelBinarizer
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
tqdm.pandas()
class DataframePreprocessing:
DEFAULT_TARGET_THEMES = [
5,
6,
26,
33,
139,
163,
232,
313,
339,
350,
406,
409,
555,
589,
597,
634,
660,
695,
729,
766,
773,
793,
800,
810,
852,
895,
951,
975,
]
OTHER_THEMES_VALUE = 4242
def __init__(
self,
df=
|
pd.DataFrame()
|
pandas.DataFrame
|
import re
import numpy as np
import pandas as pd
from arc._common import prob_metric_cal
import matchzoo as mz
from arc.anmm_impl import anmm_train
from arc.arci_impl import arci_train
from arc.arcii_impl import arcii_train
from arc.bimpm_impl import bimpm_train
from arc.cdssm_impl import cdssm_train
from arc.conv_knrm_impl import conv_knrm_train
from arc.diin_impl import diin_train
from arc.drmm_impl import drmm_train
from arc.drmmtks_impl import drmmtks_train
from arc.dssm_impl import dssm_train
from arc.duet_impl import duet_train
from arc.esim_impl import esim_train
from arc.hbmp_impl import hbmp_train
from arc.knrm_impl import knrm_train
from arc.match_lstm_impl import match_lstm_train
from arc.match_pyramid_impl import match_pyramid_train
from arc.match_srnn_impl import match_srnn_train
from arc.mv_lstm_impl import mv_lstm_train
from utils.util_params import arc_params_control
def trans_text(str_data_list):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = np.array(num_list, dtype=np.float32)
res.append(num_arr)
print('Shape of text: ', np.array(res).shape)
return res
def trans_ngram(str_data_list, ngram=3):
res = []
for str_data in str_data_list:
str_list = re.findall('\d+', str_data)
num_list = list(map(int, str_list))
num_arr = []
for i in range(len(num_list)):
if i < len(num_list) - ngram + 1:
gram = num_list[i: i + ngram]
else:
gram = num_list[i: len(num_list)] + [0] * (ngram - (len(num_list) - i))
num_arr.append(gram)
res.append(np.array(num_arr, dtype=np.float))
print('Shape of n-gram: ', np.array(res).shape)
return res
def trans_hist(str_data_list_left, str_data_list_right, bin_size):
res_left = trans_ngram(str_data_list_left, 5)
res_right = trans_ngram(str_data_list_right, 5)
res_len = len(res_right[0])
for left_text, right_text in zip(res_left, res_right):
for i in range(res_len):
score_list = []
for j in range(res_len):
score = np.dot(left_text[i], right_text[j]) / (np.linalg.norm(left_text[i]) * (np.linalg.norm(right_text[j])))
score_list.append(score)
# print('Shape of n-gram: ', np.array(res).shape)
# return res
def trans_pd(file_name, arc, params):
pd_data = pd.read_csv(file_name)
id_left_list = pd_data['id_left'].values
text_left_list = trans_text(pd_data['text_left'].values)
length_left_list = list(map(int, pd_data['length_left'].values))
id_right_list = pd_data['id_right'].values
text_right_list = trans_text(pd_data['text_right'].values)
length_right_list = list(map(int, pd_data['length_right'].values))
label_list = list(map(float, pd_data['label'].values))
if arc == 'dssm':
# ngram_left_list = trans_ngram(pd_data['text_left'].values, params['ngram'])
# ngram_right_list = trans_ngram(pd_data['text_right'].values, params['ngram'])
data = {'id_left': pd.Series(id_left_list),
'text_left': pd.Series(text_left_list),
'ngram_left': pd.Series(text_left_list),
'length_left': pd.Series(length_left_list),
'id_right': pd.Series(id_right_list),
'text_right': pd.Series(text_right_list),
'ngram_right': pd.Series(text_right_list),
'length_right': pd.Series(length_right_list),
'label': pd.Series(label_list)}
elif arc in ['cdssm', 'duet']:
ngram_left_list = trans_ngram(pd_data['text_left'].values, params['ngram'])
ngram_right_list = trans_ngram(pd_data['text_right'].values, params['ngram'])
data = {'id_left': pd.Series(id_left_list),
'text_left': pd.Series(text_left_list),
'ngram_left': pd.Series(ngram_left_list),
'length_left':
|
pd.Series(length_left_list)
|
pandas.Series
|
import re
from datetime import datetime, date
import pandas as pd
import pickledb
from log import log, log_to_file, get_file_log
from queries.player_rank_queries import record_all_player_ranks, retrieve_all_player_ranks
from utils import get_chrome_driver
RANKS_LOGS = get_file_log("update_player_ranks")
def scrap_all_player_ranks(log_file_path, pickle_db_path):
driver = get_chrome_driver()
try:
driver.get("https://www.atptour.com/en/rankings/singles")
date_str = driver.find_element_by_xpath("//div[@class='dropdown-wrapper']/div[1]/div/div").text
last_ranking_date = datetime.strptime(date_str, '%Y.%m.%d').date()
today = date.today()
if last_ranking_date != today:
# Check if last ranking date on atptour match current date. If not, do not scrap
raise ValueError()
driver = get_chrome_driver(driver)
driver.get("https://www.atptour.com/en/rankings/singles?rankDate={0}&rankRange=1-5000".format(
date_str.replace(".", "-")))
ranks = []
rank_elems = driver.find_elements_by_class_name("rank-cell")
for rank_elem in rank_elems:
rank_str = rank_elem.text
# Some low-level players has rank suffixed with T because they are ex-aequo
rank_str = rank_str.replace("T", "")
rank = int(rank_str)
ranks.append(rank)
points_elems = driver.find_elements_by_xpath("//td[@class='points-cell']/a")
rank_points = [points.text for points in points_elems]
rank_points = [int(points.replace(",", "")) for points in rank_points]
player_ids = []
player_elems = driver.find_elements_by_xpath("//td[@class='player-cell']/span[1]/a[1]")
for elem in player_elems:
href = elem.get_attribute("href")
player_id_regex = re.search("players/.*/(.*)/overview", href)
player_ids.append(player_id_regex.group(1))
player_ranks =
|
pd.DataFrame({"rank": ranks, "player_id": player_ids, "rank_points": rank_points})
|
pandas.DataFrame
|
import unittest
import pandas as pd
from stat570.linear_model.linear_regression import LinearRegression
from sklearn.utils.estimator_checks import check_estimator
from sklearn import datasets
class LinearRegressionTestCase(unittest.TestCase):
def test_estimator_check(self):
check_estimator(LinearRegression)
def test_fit(self):
boston = datasets.load_boston()
feature_idx = [
idx for idx, feature in enumerate(boston.feature_names)
if feature in ['RM', 'LSTAT']]
X = boston.data[:, feature_idx]
y = boston.target
linear_model = LinearRegression().fit(X, y)
self.assertAlmostEqual(
linear_model.residual_variance_,
30.69445169247223)
self.assertAlmostEqual(
linear_model.coefficients_['estimate'][1],
5.094788)
def test_from_data_frame(self):
boston = datasets.load_boston()
medv = boston.target
boston =
|
pd.DataFrame(boston.data, columns=boston.feature_names)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Toolkit Authors
from abc import ABC
from pydtk.models import BaseModel, register_model
import numpy as np
import pandas as pd
import sys
import datetime
@register_model(priority=1)
class GenericCsvModel(BaseModel, ABC):
"""A generic model for a csv file."""
_content_type = 'text/csv'
_data_type = None # allow any data-type
_file_extensions = ['.csv']
_contents = '.*'
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
start_timestamp (float): timestamp to start loading (not supported)
end_timestamp (float): timestamp to end loading (not supported)
"""
if start_timestamp is not None and end_timestamp is not None:
raise ValueError('Specifying time-range to load is not supported in GenericCsvModel')
data = pd.read_csv(path, header=None).to_numpy()
self.data = data
def _save(self, path, **kwargs):
"""Save ndarray data to a csv file.
Args:
path (str): path to the output csv file
"""
data = pd.DataFrame(self.data)
data.to_csv(path, header=False, index=False)
@property
def timestamps(self):
"""Return timestamps as ndarray."""
# this is prototype
return self.data
def to_ndarray(self):
"""Return data as ndarray."""
return self.data
@classmethod
def generate_contents_meta(cls, path, content_key='content'):
"""Generate contents metadata.
Args:
path (str): File path
content_key (str): Key of content
Returns:
(dict): contents metadata
"""
# Load file
data = pd.read_csv(path)
columns = data.columns.tolist()
# Generate metadata
contents = {content_key: {'columns': columns, 'tags': ['csv']}}
return contents
@classmethod
def generate_timestamp_meta(cls, path):
"""Generate contents metadata.
Args:
path (str): File path
Returns:
(list): [start_timestamp, end_timestamp]
"""
raise NotImplementedError
@register_model(priority=2)
class CameraTimestampCsvModel(GenericCsvModel, ABC):
"""A model for a csv file containing camera timestamps."""
_contents = {'camera/.*': {'tags': ['.*']}}
_columns = ['timestamp']
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
start_timestamp (float): timestamp to start loading (not supported)
end_timestamp (float): timestamp to end loading (not supported)
"""
if start_timestamp is None:
start_timestamp = self.metadata.data['start_timestamp']
if end_timestamp is None:
end_timestamp = self.metadata.data['end_timestamp']
# load csv
super()._load(path=path, **kwargs)
# filter
start_msec, end_msec = start_timestamp * 1000, end_timestamp * 1000 # sec. -> msec.
data = self.data
data = data[np.logical_and(data[:, 0] >= start_msec, data[:, 0] <= end_msec), 0]
# Convert unit (msec. -> sec.)
# Note: CSV file timestamps in "Driving behavior DB" is recorded in msec.
data = data.astype(np.float) * pow(10, -3)
self.data = data
def to_ndarray(self):
"""Return data as ndarray."""
return self.data
@property
def timestamps(self):
"""Return timestamps as ndarray."""
return self.data
@register_model(priority=3)
class AnnotationCsvModel(GenericCsvModel, ABC):
"""A model for a csv file containing annotations."""
_contents = {'.*annotation': {'tags': ['.*']}}
_data_type = "annotation"
_columns = ['Record_ID', 'Annotator_ID', 'Risk_score', 'Subjective_risk_score',
'Scene_description', 'Risk_factor', 'Environmental_tag', 'Behavior_tag']
_nan_convert_map = {'Risk_factor': ''}
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
"""
data = pd.read_csv(path)
self.data = data
def _save(self, path, **kwargs):
"""Save ndarray data to a csv file.
Args:
path (str): path to the output csv file
"""
data =
|
pd.DataFrame(self.data)
|
pandas.DataFrame
|
from COVID_DataProcessor.datatype import Country, PreprocessInfo, get_country_name, PreType
from COVID_DataProcessor.util import get_period, path_to_name
from dataclasses import fields
from os.path import join, abspath, dirname, isfile
from pathlib import Path
from glob import glob
import pandas as pd
ROOT_PATH = Path(abspath(dirname(__file__))).parent
DATASET_PATH = join(ROOT_PATH, 'dataset')
SETTING_PATH = join(ROOT_PATH, 'settings')
RESULT_PATH = join(ROOT_PATH, 'results')
def get_safe_path(directory_list):
safe_path = ''
for directory in directory_list:
safe_path = join(safe_path, directory)
Path(safe_path).mkdir(parents=True, exist_ok=True)
return safe_path
def get_result_base_path(country):
return get_safe_path([RESULT_PATH, get_country_name(country)])
def load_links(country=None):
link_path = join(DATASET_PATH, 'links.csv')
link_df = pd.read_csv(link_path, index_col='country')
return link_df.loc[get_country_name(country), :] if country is not None else link_df
def load_population(country, region=None):
population_df = pd.read_csv(join(DATASET_PATH, get_country_name(country), 'population.csv'), index_col='regions')
return population_df if region is None else population_df.loc[region, 'population']
def load_regions(country):
population_df = load_population(country)
regions = population_df.index.tolist()
regions.sort()
return regions
def load_raw_data(country):
raw_path = join(DATASET_PATH, get_country_name(country))
raw_path_list = glob(join(raw_path, 'raw_data', '*.csv'))
if len(raw_path_list) == 0:
print(f'Raw data of {get_country_name(country)} is not existing!')
raise FileNotFoundError(raw_path)
raw_dict = dict()
for file_path in raw_path_list:
file_name = path_to_name(file_path)
raw_df = pd.read_csv(file_path)
raw_dict.update({file_name: raw_df})
return raw_dict
def load_origin_data(country):
origin_path = join(DATASET_PATH, get_country_name(country), 'origin_data')
regions = load_regions(country)
data_dict = dict()
for region in regions:
print(join(origin_path, f'{region}.csv'))
region_df = pd.read_csv(join(origin_path, f'{region}.csv'), index_col='date')
data_dict.update({region: region_df})
return data_dict
def load_us_confirmed_data():
origin_path = join(DATASET_PATH, get_country_name(Country.US_CONFIRMED), 'origin_data')
saving_path = join(origin_path, 'US_CONFIRMED.csv')
us_confirmed_df =
|
pd.read_csv(saving_path, index_col='regions')
|
pandas.read_csv
|
#-*-coding:utf-8-*-
import requests
from bs4 import BeautifulSoup
import pickle as pc
import logging
import time
import pandas as pd
import numpy as np
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36',
'Referer':'http://www.npr.org/books/'}
#%%
rooturl='http://www.npr.org/books/'
rootpg=requests.get(rooturl, headers=headers)
rtsoup = BeautifulSoup(rootpg.content)
allas = [str(a0) for a0 in [a.get('href') for a in rtsoup.find_all('a')] if str(a0).startswith('/books/genres/')]
genres0 = {a.split('/')[-2]:'http://www.npr.org'+a for a in allas}
needed_genres = ['biography-memoir',
'children',
'comedy',
'comics-graphic-novels',
'digital-culture',
'faith-spirituality',
'food-wine',
'history-society',
'historical-fiction',
'horror-supernatural',
'literary-fiction',
'mystery-thrillers',
'parenting-families',
'politics-public-affairs',
'romance',
'science-fiction-fantasy',
'science-health',
'sports',
'travel',
'young-adults']
genres = {i:genres0[i] for i in needed_genres}
with open('genres_urls.pc','wb') as f:
pc.dump(genres,f)
# get all the <article>s
for g in genres:
achurl=genres[g]+'archive/'
dateend='12-31-2015'
start=0
pages=[]
logging.basicConfig(filename=g+'_log.log',level=logging.DEBUG)
while True:
time.sleep(1)
nowurl=achurl+r'?start='+str(start)+r'&date=12-31-2015'
pg=requests.get(nowurl, headers=headers)
pages.append(pg)
with open(g+'_pages.pc','wb') as f:
pc.dump(pages,f)
soup = BeautifulSoup(pg.content,"lxml")
arts = soup.find_all('article')
logging.debug('Done with '+nowurl+', find arts: '+str(len(arts)))
if len(arts):
logging.debug('First title: '+arts[0].a.get('href').split('/')[-1])
else:
logging.debug('End of this genre!')
break
start+=15
if start>=1500:
break
# stat for the article lists
#stat={}
#all_art={}
#for g in genres:
# with open(g+'_pages.pc','rb') as f:
# pages=pc.load(f)
# art_dict={}
# for pg in pages:
# soup = BeautifulSoup(pg.content,"lxml")
# arts = soup.find_all('article')
# art_dict.update({art.a.get('href').split('/')[-2]:art for art in arts})
# all_art.update(art_dict)
# stat[g]=len(art_dict)
#stat
#len(all_art)
#with open('stat.pc','wb') as f:
# pc.dump([stat,len(all_art)],f)
#%%
# build the whole list
# booklist=pd.DataFrame(columns=['id','name','author,'origin','index_in_origen','url'])
booklist={}
with open('genres_urls.pc','rb') as f:
genres=pc.load(f)
for g in genres:
with open(g+'_pages.pc','rb') as f:
pages=pc.load(f)
index_in_origen=0
for pg in pages:
soup = BeautifulSoup(pg.content,"lxml")
arts = soup.find_all('article')
for art in arts:
book={}
book['name']=art.h2.string
book['url']=art.h2.a.get('href')
try:
book['author']=art.find_all('p','author')[0].a.string
except:
try:
book['author']=art.find_all('p','author')[0].get_text().replace('by ','')
except:
print(art.find_all('p','author'))
book['author']=''
book['index_in_origen']=index_in_origen
index_in_origen+=1
book['origin']=g
booklist[int(book['url'].split('/')[-2])]=book
print(g,'Done')
for id in booklist:
for nm in booklist[id]:
booklist[id][nm]=str(booklist[id][nm])
booklist=pd.DataFrame(booklist).transpose()
with open('booklist.pc','wb') as f:
pc.dump(booklist,f)
#%% get the pages of all books (shuffle in case cannot get all of them)
with open('booklist.pc','rb') as f:
booklist=pc.load(f)
booklist['downloaded']=False
no=0
allpages={}
logging.basicConfig(filename='allpage_log.log',level=logging.DEBUG)
for id in booklist.index:
pg=requests.get(booklist.loc[id,'url'], headers=headers)
allpages[id]=pg.content
booklist.loc[id,'downloaded']=True
logging.debug('ID: '+str(id))
print(id,'check')
if len(allpages)%300==0:
print(len(allpages))
with open('allpages'+str(no)+'.pc','wb') as f:
pc.dump(allpages,f)
allpages={}
no+=1
time.sleep(5)
time.sleep(1)
with open('allpages'+str(no)+'.pc','wb') as f:
pc.dump(allpages,f)
logging.debug('FINISHED!')
#%% orgnize all the data
with open('booklist.pc','rb') as f:
booklist=pc.load(f)
allpages={}
for i in range(24):
with open('./allpages/allpages'+str(i)+'.pc','rb') as f:
allpages.update(pc.load(f))
booklist['summary']=False
booklist['excerpt']=False
booklist['title2']=''
booklist['imgurl']=''
booklist['img']=False
summarylist={}
excerptlist={}
genreslist={}
for id in allpages:
pg=allpages[id]
soup = BeautifulSoup(pg,"lxml")
if soup.find('h1').text != booklist.name[id]:
print(id,'name not same: ',soup.find('h1').text,booklist.name[id])
Exception()
try:
booklist.loc[id,'title2']=soup.find('div',{'class':'booktitle'}).find('h2').text
except:
pass
try:
summary=soup.find('div',{'id':'summary'}).text.strip()
summarylist[id]=summary
booklist.loc[id,'summary']=True
except:
summarylist[id]=''
genres=soup.find('div',{'id':'bookmeta'}).find_all('a')
genres=[a.text for a in genres]
genreslist[id]=genres
try:
excerpt=soup.find('div',{'id':'storytext'}).find_all('p')
excerptlist[id]='\n'.join([a.text for a in excerpt]).strip()
booklist.loc[id,'excerpt']=True
except:
excerptlist[id]=''
try:
booklist.loc[id,'imgurl']=soup.find('img',{'class':'img'}).get('src')
booklist.loc[id,'img']=True
except:
pass
if len(summarylist)%100==0: # show the pregress
print(len(summarylist)/len(allpages))
with open('sum_exc_gen.pc','wb') as f:
pc.dump([summarylist,excerptlist,genreslist],f)
# add genres to booklist
maxgen=0
for id in genreslist:
if len(genreslist[id])>maxgen:
maxgen=len(genreslist[id])
print(maxgen)
for i in range(maxgen):
booklist['genre'+str(i)]=''
for id in genreslist:
for i,t in enumerate(genreslist[id]):
booklist.loc[id,'genre'+str(i)]=t
booklist['fiction']=np.nan
for id in genreslist:
if 'Fiction' in genreslist[id]:
booklist.loc[id,'fiction']=True
elif 'Nonfiction' in genreslist[id]:
booklist.loc[id,'fiction']=False
with open('booklist.pc','wb') as f:
pc.dump(booklist,f)
#%% get all images
# make dir img/
with open('booklist.pc','rb') as f:
booklist=pc.load(f)
for id,url in booklist.imgurl.iteritems():
if url.find('-s99-c15')==0: print('not found')
N,n=booklist.img.sum(),0
for id,url in booklist.imgurl.iteritems():
if booklist.loc[id,'img']==False:
continue
url=url.replace('-s99-c15','')
im=requests.get(url, headers=headers)
with open('./img/'+str(id)+'.jpg','wb') as f:
f.write(im.content)
n+=1
if n%100==0:
print(n/N)
#%%######################!!!!!!
#here use nprspider2.py and nprspider4.py to add data.
#########################!!!!!!
#%% output to csv
with open('booklist.pc','rb') as f:
booklist=pc.load(f)
with open('sum_exc_gen.pc','rb') as f:
summarylist,excerptlist,genreslist=pc.load(f)
booklist['title1']=booklist.pop('name') # change name to title1!
newcols=['author','title1','title2','genre0', 'genre1', 'genre2', 'genre3',
'genre4','fiction','excerpt','summary','img','origin','index_in_origen',
'url', 'imgurl']
booklist=booklist[newcols]
with open('booklist.pc','wb') as f:
pc.dump(booklist,f)
#booklist.to_csv('booklist.csv')
summary=pd.DataFrame({'summary_text':summarylist})
excerpt=
|
pd.DataFrame({'excerpt_text':excerptlist})
|
pandas.DataFrame
|
import io
import itertools
import pytest
from pandas.util.testing import (
assert_series_equal, assert_frame_equal, assert_index_equal)
from numpy.testing import assert_array_equal
import pandas as pd
import numpy as np
import matplotlib.figure
import matplotlib.pyplot as plt
from upsetplot import plot
from upsetplot import UpSet
from upsetplot import generate_counts, generate_samples
from upsetplot.plotting import _process_data
# TODO: warnings should raise errors
def is_ascending(seq):
# return np.all(np.diff(seq) >= 0)
return sorted(seq) == list(seq)
@pytest.mark.parametrize('x', [
generate_counts(),
generate_counts().iloc[1:-2],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_series(x, sort_by, sort_categories_by):
assert x.name == 'value'
for subset_size in ['auto', 'legacy', 'sum', 'count']:
for sum_over in ['abc', False]:
with pytest.raises(ValueError, match='sum_over is not applicable'):
_process_data(x, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size=subset_size, sum_over=sum_over)
df, intersections, totals = _process_data(
x, subset_size='auto', sort_by=sort_by,
sort_categories_by=sort_categories_by, sum_over=None)
assert intersections.name == 'value'
x_reordered = (x
.reorder_levels(intersections.index.names)
.reindex(index=intersections.index))
assert len(x) == len(x_reordered)
assert x_reordered.index.is_unique
assert_series_equal(x_reordered, intersections,
check_dtype=False)
if sort_by == 'cardinality':
assert is_ascending(intersections.values[::-1])
else:
# check degree order
assert is_ascending(intersections.index.to_frame().sum(axis=1))
# TODO: within a same-degree group, the tuple of active names should
# be in sort-order
if sort_categories_by:
assert is_ascending(totals.values[::-1])
assert np.all(totals.index.values == intersections.index.names)
assert np.all(df.index.names == intersections.index.names)
assert set(df.columns) == {'_value', '_bin'}
assert_index_equal(df['_value'].reorder_levels(x.index.names).index,
x.index)
assert_array_equal(df['_value'], x)
assert_index_equal(intersections.iloc[df['_bin']].index,
df.index)
assert len(df) == len(x)
@pytest.mark.parametrize('x', [
generate_samples()['value'],
generate_counts(),
])
def test_subset_size_series(x):
kw = {'sort_by': 'cardinality',
'sort_categories_by': 'cardinality',
'sum_over': None}
df_sum, intersections_sum, totals_sum = _process_data(
x, subset_size='sum', **kw)
if x.index.is_unique:
expected_warning = None
else:
expected_warning = FutureWarning
with pytest.warns(expected_warning):
df, intersections, totals = _process_data(
x, subset_size='legacy', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
if x.index.is_unique:
df, intersections, totals = _process_data(
x, subset_size='auto', **kw)
assert_frame_equal(df, df_sum)
assert_series_equal(intersections, intersections_sum)
assert_series_equal(totals, totals_sum)
else:
with pytest.raises(ValueError):
_process_data(
x, subset_size='auto', **kw)
df_count, intersections_count, totals_count = _process_data(
x, subset_size='count', **kw)
df, intersections, totals = _process_data(
x.groupby(level=list(range(len(x.index.levels)))).count(),
subset_size='sum', **kw)
assert_series_equal(intersections, intersections_count, check_names=False)
assert_series_equal(totals, totals_count)
@pytest.mark.parametrize('sort_sets_by', [None, 'cardinality'])
@pytest.mark.parametrize('x', [
generate_counts(),
])
def test_sort_sets_by_deprecation(x, sort_sets_by):
with pytest.warns(DeprecationWarning, match='sort_sets_by'):
upset1 = UpSet(x, sort_sets_by=sort_sets_by)
with pytest.warns(None):
upset2 = UpSet(x, sort_categories_by=sort_sets_by)
fig = matplotlib.figure.Figure()
upset1.plot(fig)
png1 = io.BytesIO()
fig.savefig(png1, format='raw')
fig = matplotlib.figure.Figure()
upset2.plot(fig)
png2 = io.BytesIO()
fig.savefig(png2, format='raw')
assert png1.getvalue() == png2.getvalue()
@pytest.mark.parametrize('x', [
generate_samples()['value'],
])
@pytest.mark.parametrize('sort_by', ['cardinality', 'degree'])
@pytest.mark.parametrize('sort_categories_by', [None, 'cardinality'])
def test_process_data_frame(x, sort_by, sort_categories_by):
X = pd.DataFrame({'a': x})
with pytest.raises(ValueError, match='Please specify subset_size'):
_process_data(X, sort_by=sort_by,
sort_categories_by=sort_categories_by,
subset_size='legacy', sum_over=None)
with pytest.warns(None):
df, intersections, totals = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='a', subset_size='auto')
assert df is not X
# check equivalence to Series
df1, intersections1, totals1 = _process_data(
x, sort_by=sort_by, sort_categories_by=sort_categories_by,
subset_size='sum', sum_over=None)
assert intersections.name == 'a'
assert_frame_equal(df, df1.rename(columns={'_value': 'a'}))
assert_series_equal(intersections, intersections1, check_names=False)
assert_series_equal(totals, totals1)
# check effect of extra column
X = pd.DataFrame({'a': x, 'b': np.arange(len(x))})
df2, intersections2, totals2 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='a', subset_size='auto')
assert_series_equal(intersections, intersections2)
assert_series_equal(totals, totals2)
assert_frame_equal(df, df2.drop('b', axis=1))
assert_array_equal(df2['b'], X['b']) # disregard levels, tested above
# check effect not dependent on order/name
X = pd.DataFrame({'b': np.arange(len(x)), 'c': x})
df3, intersections3, totals3 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='c', subset_size='auto')
assert_series_equal(intersections, intersections3, check_names=False)
assert intersections.name == 'a'
assert intersections3.name == 'c'
assert_series_equal(totals, totals3)
assert_frame_equal(df.rename(columns={'a': 'c'}), df3.drop('b', axis=1))
assert_array_equal(df3['b'], X['b'])
# check subset_size='count'
X = pd.DataFrame({'b': np.ones(len(x), dtype=int), 'c': x})
df4, intersections4, totals4 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
sum_over='b', subset_size='auto')
df5, intersections5, totals5 = _process_data(
X, sort_by=sort_by, sort_categories_by=sort_categories_by,
subset_size='count', sum_over=None)
assert_series_equal(intersections4, intersections5, check_names=False)
assert intersections4.name == 'b'
assert intersections5.name == 'size'
assert_series_equal(totals4, totals5)
|
assert_frame_equal(df4, df5)
|
pandas.util.testing.assert_frame_equal
|
# product-demand-adam-auto.py
'''
Uses TensorFlow to apply the Adam optimizer instead of classical stochastic gradient descent
method for the regression analysis.
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import math_ops
import re, time, pickle
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from scipy.stats import kendalltau, spearmanr
def bracket(row):
'''
This function converts [negative] string values in bracket form to standard integers.
'''
if re.search('\(', row):
return int('-' + row[1:-1])
else:
return int(row)
def code_split(code):
'''
Splits the product codes and return numerical component of the product code.
'''
z = code.split('_')
return int(z[1])
def test(lr,epochs):
'''
Takes a single learning rate and a number of epochs to return a mean squared error.
'''
w = tf.Variable([[0],[0],[0],[0],[0],[0]], trainable=True, dtype=tf.float64)
x = tf.convert_to_tensor(X_train_val, dtype=tf.float64)
y = tf.convert_to_tensor(y_train_val, dtype=tf.float64)
y_pred = tf.matmul(x, w)
mse = tf.losses.mean_squared_error(y, y_pred)
adam = tf.train.AdamOptimizer(learning_rate=lr)
a = adam.minimize(mse, var_list=w)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for n in range(epochs):
sess.run(a)
check = sess.run(mse)
w = sess.run(w)
return check
def scan(lr_range, epochs):
'''
Applies a range of learning rates and epochs to the test function to return a list of
mean squared errors for each corresponding learning rate.
'''
results = []
for i in lr_range:
x = test(i,epochs)
results.append(x)
return results
def best(keys, values):
'''
Creates a dataframe of the learning rates and preliminary mean squared error values then
returns the learning rate with the lowest MSE.
'''
group = dict(zip(keys, values))
to_df = {'LR':keys, 'MSE':values}
df = pd.DataFrame(to_df)
lowest = df[df['MSE'] == df['MSE'].min()]['LR']
return float(lowest)
def trunc_plot(array, length):
'''
Processes output values from Adam optimization for plotting.
'''
for i in range(1,length):
yield array[i]
def listing(weights):
'''
Generates final weight values for printing.
'''
for i in range(0,len(weights)):
yield weights[i][0]
def output(columns, weights):
for i in range(0,len(weights)):
print(f'{columns[i]:<13}: {weights[i]}')
if __name__ == '__main__':
df =
|
pd.read_csv('Historical Product Demand.csv')
|
pandas.read_csv
|
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 =
|
sql.read_sql_table("test_schema_public", self.conn)
|
pandas.io.sql.read_sql_table
|
import tensorflow as tf
import pandas as pd
from matplotlib import pyplot as plt
def buildModel(learningRate):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(units=1, input_shape=(1,)))
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=learningRate),loss="mean_squared_error",metrics=[tf.keras.metrics.RootMeanSquaredError()])
return model
def trainModel(model, feature, label, epochs, batchSize):
history = model.fit(x=feature, y=label, batch_size=batchSize,epochs=epochs)
trainedWeight = model.get_weights()[0]
trainedBias = model.get_weights()[1]
epochs = history.epochs
hist =
|
pd.DataFrame(history.history)
|
pandas.DataFrame
|
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
|
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
|
pandas._testing.assert_numpy_array_equal
|
# IMPORTING PACKAGES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import requests
from math import floor
from termcolor import colored as cl
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = (20,10)
# EXTRACTING STOCK DATA
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2010-01-01')
aapl.tail()
# WILLIAMS %R CALCULATION
def get_wr(high, low, close, lookback):
highh = high.rolling(lookback).max()
lowl = low.rolling(lookback).min()
wr = -100 * ((highh - close) / (highh - lowl))
return wr
aapl['wr_14'] = get_wr(aapl['high'], aapl['low'], aapl['close'], 14)
aapl.tail()
# WILLIAMS %R PLOT
plot_data = aapl[aapl.index >= '2020-01-01']
ax1 = plt.subplot2grid((11,1), (0,0), rowspan = 5, colspan = 1)
ax2 = plt.subplot2grid((11,1), (6,0), rowspan = 5, colspan = 1)
ax1.plot(plot_data['close'], linewidth = 2)
ax1.set_title('AAPL CLOSING PRICE')
ax2.plot(plot_data['wr_14'], color = 'orange', linewidth = 2)
ax2.axhline(-20, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.axhline(-80, linewidth = 1.5, linestyle = '--', color = 'grey')
ax2.set_title('AAPL WILLIAMS %R 14')
plt.show()
# MACD CALCULATION
def get_macd(price, slow, fast, smooth):
exp1 = price.ewm(span = fast, adjust = False).mean()
exp2 = price.ewm(span = slow, adjust = False).mean()
macd =
|
pd.DataFrame(exp1 - exp2)
|
pandas.DataFrame
|
import sys
from anndata import AnnData
sys.path.append('/data/workspace/st/stereopy-release')
from stereo.io.reader import read_stereo_data
import scanpy as sc
import numpy as np
import pandas as pd
path = '/data/workspace/st/stereopy-release/test/Gene_bin50_lassoleiden.h5ad'
adata = sc.read_h5ad(path)
adata = AnnData(adata.raw.X, var=pd.DataFrame(index=adata.var.index), obs=
|
pd.DataFrame(index=adata.obs.index)
|
pandas.DataFrame
|
import datetime
import numpy as np
import pandas as pd
from sqlalchemy import sql
def get_and_adjust_data(db_engine, station_id, start, end):
"""
Get data from the database in both the bike count format and the outage
format, between the passed dates. If bike count data and outage data is
available for the same time, bike count data takes precedence.
If no data is available for a subset of the passed period of time, it will
be left out of the returned dataset.
"""
data_list = []
# Create empty DateTimeIndex with frequency of five minutes, and assign it
# to an empty series.
# "5T" is five minutes.
dti = pd.date_range(0, -1, freq="5T")
data = pd.Series(None, index=dti)
# Add data in the bike count format.
bike_counts = pd.read_sql_query(
"SELECT ts, bikes, spaces FROM bike_count "
+ "WHERE station_id = %(station_id)s AND "
+ "ts >= %(start)s AND ts <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# bike_count[0] is the index, [1..3] are the columns in the order
# selected in the above query
for bike_count in bike_counts.itertuples():
# Do not insert counts with no bikes or spaces (inactive stations).
if not (bike_count[2] == 0 and bike_count[3] == 0):
ts = pd.to_datetime(bike_count[1], infer_datetime_format=True)
# Round the timestamp to the nearest five minute mark.
ts += datetime.timedelta(seconds=150)
ts = ts.replace(
minute=(ts.minute - (ts.minute % 5)), second=0, microsecond=0)
# A status of np.nan means the station is neither full nor empty.
status = np.nan
if bike_count[2] == 0:
status = "empty"
elif bike_count[3] == 0:
status = "full"
# Create index with only one entry, ts.
index = pd.date_range(ts, ts, freq="5T")
data_list.append(pd.Series(status, index=index))
if len(data_list) > 0:
data = pd.concat(data_list)
try:
data_list = []
# Add data in the outage format.
outages = pd.read_sql_query(
"SELECT outage_type, outage_start, outage_end FROM outage "
+ "WHERE station_id = %(station_id)s AND "
+ "outage_start >= %(start)s AND outage_end <= %(end)s;",
db_engine, params={
"station_id": station_id, "start": start, "end": end})
# Merge each outage into dataframe.
for outage in outages.itertuples():
ostart = pd.to_datetime(outage[2], infer_datetime_format=True)
ostart += datetime.timedelta(seconds=150)
ostart = ostart.replace(
minute=(ostart.minute - (ostart.minute % 5)),
second=0, microsecond=0)
oend = pd.to_datetime(outage[3], infer_datetime_format=True)
oend += datetime.timedelta(seconds=150)
oend = oend.replace(
minute=(oend.minute - (oend.minute % 5)),
second=0, microsecond=0)
index = pd.date_range(ostart, oend, freq="5T")
data_list.append(
|
pd.Series(outage[1], index=index)
|
pandas.Series
|
'''
Esta clase permite automatizar el proceso de exportacion de datos de un CSV a base de datos
'''
import pandas as pd
from pathlib import Path
import re
import numpy as np
class DataExportManager:
@staticmethod
def exportAttributes(MyConnection):
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/datosAtributosCsv.csv").resolve()
header=['TAGS','IDAG','ATRIBUTO GENERAL','IDAE','ATRIBUTO ESPECIFICO']
dfAttributes = pd.read_csv(file_path, header=0, names=header, encoding='utf-8')
for index, row in dfAttributes.iterrows():
if(not MyConnection.addAtribute(row[2],row[4])):
print('Error')
break
print('atributo: ',index,' ok')
dfAttributes.to_csv(file_path, encoding="utf-8", index=False)
print('::ok::Atributos exportados...')
return "ok"
@staticmethod
def exportAutos(MyConnection):
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/autos_data_mod_csv.csv").resolve()
dfAutos = pd.read_csv(file_path,encoding='utf-8')
dfAutos.fillna(0)
for index, row in dfAutos.iterrows():
if(not MyConnection.addAuto(row[0],row[1],row[2],row[3],row[-3],row[-1])):
print('Error')
break
print('auto: ',index,' ok')
print('::ok::Autos exportados...')
return "ok"
@staticmethod
def exportAutosAttributes(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/autos_data_mod_csv.csv").resolve()
dfAutos = pd.read_csv(file_path,encoding='utf-8')
dfAutos.drop(['marca','modelo','año','versión','url'],axis='columns', inplace=True)
for index, row in dfAutos.iterrows():
i=0
for data in row:
if data==1:
if(not MyConnection.addDatasheet(index+1,i+1)):
print('Error')
sms='failed'
break
i+=1
print('auto: ',index+1)
print('::ok::Autos-Atributos exportados...')
return sms
@staticmethod
def exportTags(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path = (base_path / "data_csv/datosEtiquetasCsv.csv").resolve()
dfTags = pd.read_csv(file_path,encoding='utf-8')
for index, row in dfTags.iterrows():
if(not MyConnection.addTag(row[1],row[2])):
print('Error')
sms='failed'
break
print('Tag ',index,' ok')
print('::ok::Tags exportados...')
return sms
@staticmethod
def exportTagsAttributes(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path1 = (base_path / "data_csv/datosAtributosCsv.csv").resolve()
file_path2 = (base_path / "data_csv/datosEtiquetasCsv.csv").resolve()
dfAttribs = pd.read_csv(file_path1,encoding='utf-8')
dfTags = pd.read_csv(file_path2,encoding='utf-8')
dfAttribs["TAGS"].fillna("", inplace = True)
for index, row in dfTags.iterrows():
dfAux=dfAttribs.loc[dfAttribs['TAGS'].str.contains(row['TAG'], flags = re.IGNORECASE)]
for index1, row1 in dfAux.iterrows():
if(not MyConnection.addLinkAttributeTag(index+1,index1+1)):
print('Error')
sms='failed'
break
print('Tag ligado ',index,' ok')
print('::ok::Tags-Atributos exportados...')
return sms
@staticmethod
def exportResponsesAttributes(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path1 = (base_path / "data_csv/datosMtxCsv.csv").resolve()
dfMtx = pd.read_csv(file_path1,encoding='utf-8')
dfMtx.drop(['ID R'],axis='columns', inplace=True)
for index, row in dfMtx.iterrows():
i=0
for data in row:
if data==1:
if(not MyConnection.addLinkAttributeResponse(index+1,int(dfMtx.columns[i]))):# se trata a el nombre de la columna como id
sms='failed'
break
i+=1
print('respuesta: ',index+1)
print('::ok::MatrizAtributos exportada...')
return sms
@staticmethod
def exportScoresheet(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path1 = (base_path / "data_csv/scoreSheet.csv").resolve()
dfScoreS = pd.read_csv(file_path1,encoding='utf-8')
dfScoreS.drop(['marca','modelo', 'año', 'versión','nombre'],axis='columns', inplace=True)#elimino columnas sobrantes
#dfScoreS=dfScoreS.dropna(subset=['general'])
dfScoreS=dfScoreS.fillna(0)
for index, row in dfScoreS.iterrows():
if(not MyConnection.addScoresheet(row['general'],row['confort'],row['desempeño'],row['tecnología'],
row['ostentosidad'],row['deportividad'],row['economía'],row['eficiencia'],row['seguridad'],row['ecología'],row['a_favor'],row['en_contra'],row['cP'],row['cN'],index+1)):
sms='failed'
break
print('hoja: ',index+1)
print('::ok::Hojas puntuaciones exportados...')
return sms
#INACTIVO!!
@staticmethod
def exportForms(MyConnection):
sms='ok'
base_path = Path(__file__).parent
file_path_forms = (base_path / "data_csv/dataforms.csv").resolve()
file_path_quest = (base_path / "data_csv/datosFormularioCsv.csv").resolve()
file_path_out_numericForms = (base_path / "data_csv/datosFormularioNumericCsv.csv").resolve()
header=['FECHA','EDAD','GENERO','OCUPACION','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17']
dfForms = pd.read_csv(file_path_forms,encoding='utf-8', names=header, header=0)
dfQuest=pd.read_csv(file_path_quest,encoding='utf-8')
dfForms.drop(['FECHA','EDAD','GENERO','OCUPACION'],axis='columns', inplace=True)
dfNumericForms=DataExportManager.translateResponses(dfForms,dfQuest)
dfNumericForms.to_csv(file_path_out_numericForms,index=False)## ALMACENO EL FORMULARIO NUMERICO PARA SER UTILIZADO MAS ADELANTE
print('Archivo "datosFormularioNumericCsv.csv" generado con exito')
return sms
#NO SE OCUPA
#'''
@staticmethod
def parseAttribs(MyConnection):
# Este metodo rescata las puntuaciones maximas de popularidad por atributo (considerando numero de preguntas y respuestas relacionadas a ellos)
# Se ejecuta despeusd de tener la base de datos llenada de formulario y atributos (antes de entrenar modelo)
base_path = Path(__file__).parent
file_attributes_path = (base_path / "data_csv/datosAtributosCsv.csv").resolve()
header=['TAGS','IDAG','ATRIBUTO GENERAL','IDAE','ATRIBUTO ESPECIFICO']
dfAttributes = pd.read_csv(file_attributes_path, header=0, names=header, encoding='utf-8')
columns=['MAX_P', 'MAX_R']
dfAttributes[columns] =
|
pd.DataFrame([[np.nan, np.nan]], index=dfAttributes.index)
|
pandas.DataFrame
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objs as go
import dash_daq as daq
import dash_table
import datetime
from datetime import datetime as dt
from datetime import timedelta
import dateutil.relativedelta
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# from UsedFunctions import *
#====================================================================================== Connecting to DB
import pyodbc
cnxn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=;"
"Database=;"
"Uid=;"
"Pwd=;"
"MARS_Connection=Yes;")
cnxn1 = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=;"
"Database=;"
"Uid=;"
"Pwd=;"
"MARS_Connection=Yes;")
#====================================================================================== Collecting the global data
#------------ Map
TrueCodes = pd.read_csv(r'data\CountryCodes.csv')
drop_box = []
drop_box.append('All')
for country in TrueCodes.Entity.values:
drop_box.append(country)
countries = pd.read_csv('data/CC.csv', keep_default_na=False)
prices = pd.read_csv('data/PriceChangeLog.csv', keep_default_na=False)
df_sub = pd.read_csv('data/country_data.csv')
#-------------------------------------------------------------------- Data: Retention
cohort_android = pd.read_sql_query('EXEC DS_GetRetentionAndroidData', cnxn1)
cohort_android_transpose = cohort_android.set_index('Registration Period').T
cohort_ios = pd.read_sql_query('EXEC DS_GetRetentionIOSData', cnxn1)
cohort_ios_transpose = cohort_ios.set_index('Registration Period').T
#====================================================================================== Activity colors
colors = dict(red = '#d62728', #brick red
orange = '#ff7f0e',#safety orange
pink = '#e377c2',#raspberry yogurt pink
green = '#2ca02c',#cooked asparagus green
purple = '#9467bd',#muted purple
blue = '#1f77b4',#muted blue
blue_teal = '#17becf', #blue-teal
brown = '#8c564b',#chestnut brown
gray = '#7f7f7f',#middle gray
yellow = '#bcbd22', #curry yellow-green
)
map_colorscale = [
[0, "#08519c"],
[0.5, "#6baed6"],
[1, "#9ecae1"]
]
activity_color = {'Lesson': 'red',
'User Lesson': 'orange',
'Q&A': 'purple',
'User Post': 'green',
'Code': 'blue',
'Quiz': 'brown',
'Contest': 'brown',
'Profile': 'pink',
'Own Profile': 'yellow',
'Private Codes': 'blue_teal'}
design_colors = {
'page_bg': '#0f2331',
'chart_bg': '#0e2e43',
'chart_box_bg': '#0e2e43',
'box_borders': '#143756',
'Android': '#5ab4ac',
'iOS': '#d8b365',
'Web': '#f5f5f5',
'text': '#eaf5fc',
'title': '#eaf5fc',
'chart_axis_legends': '#a1aba0',
'chart_inside_lines': '#334d61'
}
design_padding = {
'level_1': '5px',
'level_2': '0 20'
}
date_format = 'MMM Do, YY'
title_size = 20
dcc_graph_height = 350
design_padding = {
'level_1': '5px'
}
box_shadow = '0px 0px 0px 2px rgb(20, 55, 86)'
#====================================================================================== The Dash app
app = dash.Dash(__name__)
external_css = ["https://cdnjs.cloudflare.com/ajax/libs/normalize/7.0.0/normalize.min.css",
"https://cdnjs.cloudflare.com/ajax/libs/skeleton/2.0.4/skeleton.min.css",
"//fonts.googleapis.com/css?family=Raleway:400,300,600",
'https://codepen.io/plotly/pen/YEYMBZ.css',
"https://maxcdn.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css",
'https://codepen.io/chriddyp/pen/bWLwgP.css']
for css in external_css:
app.css.append_css({"external_url": css})
# ------------------------------------------------------------------------------------- Used Fnctions
def get_country_code(country_name):
country_code = TrueCodes.loc[TrueCodes.Entity == country_name, ['rand']].values[0][0]
return str(country_code)
def get_funnel(start_date, end_date, platform, country=None):
if country:
subscriptions = pd.read_sql_query(
'exec DS_Funnel @StartDate = \'' + start_date + '\', ' +
'@EndDate = \'' + end_date + '\', ' +
'@Platform = \'' + platform + '\',' +
'@Country = \'' + country + '\'', cnxn)
else:
subscriptions = pd.read_sql_query(
'exec DS_Funnel @StartDate = \'' + start_date + '\', ' +
'@EndDate = \'' + end_date + '\', ' +
'@Platform = ' + platform + ' ', cnxn)
subs = []
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['TotalSignups']].sum()))
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['TotalSubs']].sum()))
subs.append(int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['MonthlyOld']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['MonthlyNew']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['AnnualOld']].sum()) + \
int(subscriptions.loc[subscriptions.CountryCode.notnull(), ['AnnualNew']].sum()))
text = []
for i in range(len(subs)):
if i == 0:
text.append('#: ' + str(subs[i]))
else:
subs[0] = subs[0] if subs[0] != 0 else 1
text.append('#: ' + str(subs[i]) + ' <br> ' + '%: ' + str(np.round(subs[i] / subs[0] * 100, 3)))
# if platform == '1122':
# subs[0] = subs[0] / 10
# subs[1] = subs[1] * 2
# subs[2] = subs[2] * 6
# else:
# subs[0] = subs[0] / 20
# subs[1] = subs[1] * 2
# subs[2] = subs[2] * 4
return subs, text
def price_finder(row):
country_code, platform, created_date, sub_type = row[['CountryCode', 'Platform', 'CreatedDate', 'SubscriptionType']].values
return prices[prices.CC == country_code][prices.Platform == platform][prices.Subscription_type == sub_type][prices.StartDate < created_date][prices.EndDate >= created_date].Price.values[0]
def subs_table_constructor(subs, prices, countries, signups):
subs['CountryCode'] = subs['CountryCode'].apply(lambda x: x.upper())
signups['CountryCode'] = signups['CountryCode'].apply(lambda x: x.upper())
subs['CountryCode'] = subs['CountryCode'].replace(np.nan, 'NA', regex=True)
signups['CountryCode'] = signups['CountryCode'].replace(np.nan, 'NA', regex=True)
subs["SubscriptionType"] = subs["SubscriptionType"].map({'sololearn_pro_test': "monthly", 'sololearn_pro_annual': "annual", 'sololearn_pro_monthly': "monthly"})
prices["StartDate"] = pd.to_datetime(prices["StartDate"], dayfirst=True)
prices["EndDate"] = pd.to_datetime(prices["EndDate"], dayfirst=True)
subs["SubscriptionStartDate"] = pd.to_datetime(subs["SubscriptionStartDate"], dayfirst=True)
subs["SubscriptionEndDate"] = pd.to_datetime(subs["SubscriptionEndDate"], dayfirst=True)
subs['Paid'] = np.where((subs.SubscriptionEndDate - subs.SubscriptionStartDate) > datetime.timedelta(days=5), 1, 0)
subs['Annual'] = np.where((subs.SubscriptionType == "annual") & (subs.Paid == 1), 1, 0)
subs['Monthly'] = np.where((subs.SubscriptionType == "monthly") & (subs.Paid == 1), 1, 0)
subs["Price"] = subs.apply(price_finder, axis=1)
subs["Revenue"] = subs.Price * subs.Paid
subs_df = subs.groupby("CountryCode").agg({'Platform': 'count', "Paid": 'sum', "Monthly": 'sum', "Annual": 'sum', "Revenue": 'sum'})
subs_df.rename(columns={'Platform': 'TotalSubs'}, inplace = True)
final_df = pd.merge(pd.merge(countries, signups), subs_df, on="CountryCode")
final_df["Revenue_per_user"] = final_df.Revenue / final_df.NumberOfSignups
final_df["Cancel_rate"] = 1 - final_df.Paid / final_df.TotalSubs
final_df = final_df.round(3)
return final_df
table_new = dash_table.DataTable(
id='table_new',
columns= [
# {'name': 'CountryCode', 'id': 'CountryCode'},
{'name': 'Country', 'id': 'Country'},
{'name': 'NumberOfSignups', 'id': 'NumberOfSignups'},
{'name': 'TotalSubs', 'id': 'TotalSubs'},
{'name': 'Paid', 'id': 'Paid'},
{'name': 'Monthly', 'id': 'Monthly'},
{'name': 'Annual', 'id': 'Annual'},
{'name': 'Revenue', 'id': 'Revenue'},
{'name': 'Revenue_per_user', 'id': 'Revenue_per_user'},
{'name': 'Cancel_rate', 'id': 'Cancel_rate'}],
filtering=True,
sorting=True,
style_as_list_view=True,
style_header={
'backgroundColor': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(238, 238, 238)'
},
{ 'if': {'column_id': 'Country'}, 'width': '20%'},
{ 'if': {'column_id': 'NumberOfSignups'}, 'width': '10%'},
{ 'if': {'column_id': 'TotalSubs'}, 'width': '10%'},
{ 'if': {'column_id': 'Paid'}, 'width': '10%'},
{ 'if': {'column_id': 'Monthly'}, 'width': '10%'},
{ 'if': {'column_id': 'Annual'}, 'width': '10%'},
{ 'if': {'column_id': 'Revenue'}, 'width': '10%'},
{ 'if': {'column_id': 'Revenue_per_user'}, 'width': '10%'},
{ 'if': {'column_id': 'Cancel_rate'}, 'width': '10%'},
],
n_fixed_rows=1,
# style_cell={'width': '150px'},
style_table={
'maxHeight': '500',
'overflowY': 'scroll'
},
# style_data_conditional=[
# {
# 'if': {
# 'column_id': 'Number of Solar Plants',
# # 'filter': '{Number of Solar Plants} > 3.9'
# },
# 'backgroundColor': '#3D9970',
# 'color': 'white',
# }
# ]
)
# table_old = dash_table.DataTable(
# id='table_old',
# columns= [
# # {'name': 'CountryCode', 'id': 'CountryCode'},
# {'name': 'Country', 'id': 'Country'},
# {'name': 'NumberOfSignups', 'id': 'NumberOfSignups'},
# {'name': 'TotalSubs', 'id': 'TotalSubs'},
# {'name': 'Paid', 'id': 'Paid'},
# {'name': 'Monthly', 'id': 'Monthly'},
# {'name': 'Annual', 'id': 'Annual'},
# {'name': 'Revenue', 'id': 'Revenue'},
# {'name': 'Revenue_per_user', 'id': 'Revenue_per_user'},
# {'name': 'Cancel_rate', 'id': 'Cancel_rate'}],
# filtering=True,
# sorting=True,
# style_as_list_view=True,
# style_header={
# 'backgroundColor': 'white',
# 'fontWeight': 'bold'
# },
# style_cell_conditional=[
# {
# 'if': {'row_index': 'odd'},
# 'backgroundColor': 'rgb(238, 238, 238)'
# }
# ],
# n_fixed_rows=1,
# # style_cell={'width': '150px'},
# style_table={
# 'maxHeight': '250',
# 'overflowY': 'scroll'
# },
# # style_data_conditional=[
# # {
# # 'if': {
# # 'column_id': 'Number of Solar Plants',
# # # 'filter': '{Number of Solar Plants} > 3.9'
# # },
# # 'backgroundColor': '#3D9970',
# # 'color': 'white',
# # }
# # ]
# )
#------------------------------------------------------------------------------------Toggle switch
div0_1 = html.Div([
daq.ToggleSwitch(
id='toggle-switch-1',
value=False,
size=50,
label={
'label': 'Activate Filterign by Date',
'style': {
'backgroundColor': design_colors['page_bg'],
'color' : design_colors['text'],
'size' : 50
}
},
labelPosition='bottom',
color = '#5ab4ac'
)
])
div0_2 = html.Div([
daq.ToggleSwitch(
id='toggle-switch-2',
value=False,
size=50,
label={
'label': 'Activate Filtering by Platform and Country',
'style': {
'backgroundColor': design_colors['page_bg'],
'color' : design_colors['text'],
'size' : 50
}
},
labelPosition='bottom',
color = '#5ab4ac'
)
])
#====================================================================================== HTML Divs
#-------------------------------------------------------------------- Sign-ups
div1_1 = html.Div([
dcc.DatePickerRange(
id='sign-ups-date-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format=date_format,
style={'display': 'none'}
)]
)
div1_2 = html.Div([
dcc.Graph(id='sign-ups-barplot-container')
],
style={'width': '28%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div1_3 = html.Div([
dcc.Graph(id='sign-ups-map-container')
],
style={'width': '55%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div1_4 = html.Div([
dcc.Graph(id='top-countries-container')
],
style={'width': '17%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#-------------------------------------------------------------------- Retention
div2_1 = html.Div([
dcc.RadioItems(
id='platform_retention',
options=[
{'label': 'IOS', 'value': 'ios'},
{'label': 'Android', 'value': 'android'}
],
value='android',
# textfont = dict(color = 'red'),
labelStyle={'display': 'inline-block', 'color' : design_colors['text']},
style={'display': 'none'}
)
]
)
div2_2 = html.Div([
dcc.Graph(id='retention-heatmap-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div2_3 = html.Div([
dcc.Graph(id='retention-curve-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div2 = html.Div([
# html.Div([html.H1("Retention summary")], className="row gs-header gs-text-header", style={'float': 'center'}),
div2_1,
div2_2,
div2_3
],
style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '100%'}
)
#-------------------------------------------------------------------- Active users & by platform
div4_1_1 = html.Div([
dcc.DatePickerRange(
id='activity-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)
]
)
div4_2 = html.Div([
dcc.Graph(id='activity-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div4_3 = html.Div([
dcc.Graph(id='activity-pie-container')
],
style={'width': '50%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div4 = html.Div([
div4_1_1,
div4_2,
div4_3
],
style={
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '67%'}
)
#-------------------------------------------------------------------- Consumption Venn diagram
div7_1_1 = html.Div([
dcc.DatePickerRange(
id='venn-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)]
)
div7_2 = html.Div([
dcc.Graph(id='consumption-Venn-container')
],
style={'width': '100%', 'display': 'inline-block'}
)
div7 = html.Div([
div7_1_1,
div7_2,
],
style={
'backgroundColor': design_colors['page_bg'],
'padding': '0px 5px 0px 0px',
'display': 'inline-block',
'width': '33%'}
)
#-------------------------------------------------------------------- Creation
div5_1_1 = html.Div([
dcc.DatePickerRange(
id='creation-picker-range',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
)
])
div5_1_2 = html.Div([
dcc.RadioItems(
id='platform_creation',
options=[
{'label': 'iOS', 'value': 'ios'},
{'label': 'Android', 'value': 'android'},
{'label': 'Total', 'value': 'total'},
],
value='total',
labelStyle={'display': 'inline-block', 'color': design_colors['text']},
style={'display': 'none'}
)
])
div5_3 = html.Div([
dcc.Graph(id='creation_objects-container')
],
style={'width': '33.6%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#-------------------------------------------------------------------- Consumption
div6_2 = html.Div([
dcc.Graph(id='consumption_objects-container')
],
style={'width': '33%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
div6_3 = html.Div([
dcc.Graph(id='consumption_average_amount-container')
],
style={'width': '33%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
#------------------------------------------------------------------- Funnel
div8 = html.Div([
dcc.DatePickerRange(
id='old_date_picker_funnel_barplot',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=6),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.DatePickerRange(
id='new_date_picker_funnel_barplot',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt.now(),
start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.RadioItems(
id='platform_funnel_barplot',
options=[
{'label': 'Android', 'value': '1114'},
{'label': 'iOS', 'value': '1122'}
],
value='1122',
labelStyle={'display': 'inline-block', 'color': design_colors['text']},
style={'display': 'none'}
),
dcc.Dropdown(
id='country_funnel_barplot',
options=[{'label':opt, 'value':opt} for opt in drop_box],
value = drop_box[0],
style={'display': 'none'}
),
html.Div([
dcc.Graph(id='funnel-container_barplot', style={'height': 500})
],
style={'width': '100%', 'display': 'inline-block', 'padding': design_padding['level_1']}
)
],
style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': design_colors['page_bg'],
'padding': design_padding['level_1'],
'display': 'inline-block',
'width': '34%'}
)
# div3_1_1 = html.Div([
# dcc.DatePickerRange(
# id='funnel-picker-range',
# min_date_allowed=dt(2014, 1, 1),
# max_date_allowed=dt.now(),
# end_date=dt.now(),
# start_date=dt.now() - dateutil.relativedelta.relativedelta(days=3),
# display_format='MMM Do, YY',
# style={'display': 'none'}
# )
# ]
# )
# div3_1_2 = html.Div([
# dcc.RadioItems(
# id='platform_funnel',
# options=[
# {'label': 'IOS', 'value': 'ios'},
# {'label': 'Android', 'value': 'android'}
# ],
# value='android',
# labelStyle={'display': 'inline-block', 'color': design_colors['text']},
# style={'display': 'none'}
# )
# ])
# div3_2 = html.Div([
# dcc.Graph(id='funnel-container')
# ],
# style={'width': '100%', 'display': 'inline-block', 'padding': design_padding['level_1']}
# )
# div3 = html.Div([
# div3_1_1,
# div3_1_2,
# div3_2,
# ],
# style={
# 'borderBottom': 'thin lightgrey solid',
# 'backgroundColor': design_colors['page_bg'],
# 'padding': design_padding['level_1'],
# 'display': 'inline-block',
# 'width': '50%'}
# )
#------------------------- Layout of the tables
div9_1 = html.Div([dcc.DatePickerRange(
id='table_new-date-picker',
min_date_allowed=dt(2014, 1, 1),
max_date_allowed=dt.now(),
end_date=dt(2019, 4, 1),
start_date=dt(2019, 4, 1) - dateutil.relativedelta.relativedelta(weeks=1),
display_format='MMM Do, YY',
style={'display': 'none'}
),
dcc.RadioItems(
id='table_new_platform',
options=[
{'label': 'Android', 'value': '1114'},
{'label': 'iOS', 'value': '1122'}
],
value='1122',
labelStyle={'display': 'inline-block', 'color': 'white'},
style={'display': 'none'}
),
table_new
],
style = {'padding': design_padding['level_1'],
'width': '66%'
}
)
# div9_2 = html.Div([
# dcc.DatePickerRange(
# id='table_old-date-picker',
# min_date_allowed=dt(2014, 1, 1),
# max_date_allowed=dt.now(),
# end_date=dt(2019, 4, 1),
# start_date=dt(2019, 4, 1) - dateutil.relativedelta.relativedelta(weeks=1),
# display_format='MMM Do, YY',
# style={'display': 'none'}
# ),
# dcc.RadioItems(
# id='table_old_platform',
# options=[
# {'label': 'Android', 'value': '1114'},
# {'label': 'iOS', 'value': '1122'}
# ],
# value='1114',
# labelStyle={'display': 'inline-block', 'color': 'white'},
# style={'display': 'none'}
# ),
# table_old
# ],
# style={'padding': design_padding['level_1'],
# 'width': '50%'
# }
# )
div9 = html.Div([
div8,
div9_1,
# div9_2
],
style = {'backgroundColor': '#0e2e43',
'display': 'flex',
'flex-direction': 'row',
'padding': '0px 5px 0px 5px',
}
)
div_img = html.Div([
html.Div([
html.Div([
html.H5('Messenger')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_messenger.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
}),
html.Div([
html.Div([
html.H5('Comments')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_comment.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
}),
html.Div([
html.Div([
html.H5('Discussion')
],style={'size': title_size,
'color': design_colors['title'],
'text-align': "center"
}),
html.Img(src=app.get_asset_url('image_discussion.png'),
style={
'width': '100%'
})
], style={
'padding': design_padding['level_1'],
'width': '33.333%',
'display': 'inline-block'
})
])
#====================================================================================== Combining HTML Divs into the layout form
app.layout = html.Div([
div0_2,
div0_1,
div1_1,
div1_2,
div1_3,
div1_4,
div4,
div7,
div5_1_1,
div5_1_2,
div5_3,
div6_2,
div6_3,
div2_1,
div2_2,
div2_3,
# div3,
# div8,
# div_img_1,
# div_img_2,
# div_img_3,
div_img,
div9
],
style={'backgroundColor': '#0f2331'}
)
#====================================================================================== Callbacks
@app.callback(
dash.dependencies.Output(component_id='sign-ups-date-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='funnel-picker-range', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='activity-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='venn-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='creation-picker-range', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_retention', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='platform_funnel', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_creation', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='new_date_picker_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='old_date_picker_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='platform_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='country_funnel_barplot', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='table_new-date-picker', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='table_old-date-picker', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-1', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
@app.callback(
dash.dependencies.Output(component_id='table_new_platform', component_property='style'),
[dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
def show_hide_element(visibility_state):
if visibility_state:
return {'display': 'block'}
else:
return {'display': 'none'}
# @app.callback(
# dash.dependencies.Output(component_id='table_old_platform', component_property='style'),
# [dash.dependencies.Input(component_id='toggle-switch-2', component_property='value')])
# def show_hide_element(visibility_state):
# if visibility_state:
# return {'display': 'block'}
# else:
# return {'display': 'none'}
#-------------------------------------------------------------------- Sign-ups
#------------------- 1_2 Bar Plot
@app.callback(
dash.dependencies.Output('sign-ups-barplot-container', 'figure'),
[dash.dependencies.Input('sign-ups-date-picker-range', 'start_date'),
dash.dependencies.Input('sign-ups-date-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_barplot(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live == False:
signups = pd.read_csv(r'data\signups.csv')
signups.drop(columns=['Unnamed: 0'], inplace=True)
signups['Date'] = pd.to_datetime(signups['Date'])
else:
signups = pd.read_sql_query('EXEC DS_GetStatistics\''+start_date+'\',\''+end_date+'\'', cnxn)
signups['Date'] = pd.to_datetime(signups['Date'])
return {
'data': [
go.Bar(x =signups['Date'], y=signups['signups_android'], name ="Android",
marker = dict(color=design_colors['Android'])
),
go.Bar(x =signups['Date'], y=signups['signups_ios'], name ="iOS",
marker = dict(color=design_colors['iOS'])
),
go.Bar(x =signups['Date'], y=signups['signups_web'], name ="Web",
marker = dict(color=design_colors['Web'])
)
],
'layout' : {
'barmode': 'stack',
'paper_bgcolor': design_colors['chart_box_bg'],
'plot_bgcolor': design_colors['chart_bg'],
'xaxis': {
'showgrid': False,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines'],
'tickformat': '%b %d',
},
'yaxis': {
'showgrid': True,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines']
},
'margin': go.layout.Margin(
l=50,
r=50,
b=50,
t=50,
# pad=20
),
"title": '<b>Signups<b>',
'titlefont' : dict(
size=title_size,
color=design_colors['title']
),
'legend': dict(font=dict(color=design_colors['text']))
}
}
#------------------- 1_3 Map
@app.callback(
dash.dependencies.Output('sign-ups-map-container', 'figure'),
[dash.dependencies.Input('sign-ups-date-picker-range', 'start_date'),
dash.dependencies.Input('sign-ups-date-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_map(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
SignupsPerCountry = pd.read_sql_query('EXEC DS_GetCountryCodesForMap @RegisterStartDate = \''+start_date+'\', @RegisterEndDate = \''+end_date+'\'', cnxn)
else:
SignupsPerCountry = pd.read_csv(r'data\SignupsPerCountry.csv')
SignupsPerCountry.drop(columns=['Unnamed: 0'], inplace=True)
merged = pd.merge(SignupsPerCountry, TrueCodes, left_on='CountryCode', right_on='rand', how='right')
merged.fillna(0, inplace=True)
return {
'data': [go.Choropleth(
locations = merged['STANAG'],
z = merged['CountOfUsers'].astype(float),
text = merged['Entity'],
autocolorscale = False,
colorscale = map_colorscale,
reversescale = True ,
marker = go.choropleth.Marker(
line = go.choropleth.marker.Line(
color = design_colors['chart_bg'],
width = 0.5
)),
colorbar = go.choropleth.ColorBar(
title = "# of users"),
showscale=False,
)],
'layout': go.Layout(
# title = '<b>Geography<b>',
autosize=False,
paper_bgcolor = design_colors['chart_box_bg'],
plot_bgcolor = design_colors['chart_bg'],
margin=go.layout.Margin(
l=15,
r=15,
b=0,
t=15,
pad=3
),
geo = go.layout.Geo(
bgcolor = design_colors['chart_bg'],
showframe=False,
showlakes = False,
showcoastlines = False),
),
}
#------------------- 1_4 Top countries
@app.callback(
dash.dependencies.Output('top-countries-container', 'figure'),
[dash.dependencies.Input('sign-ups-date-picker-range', 'start_date'),
dash.dependencies.Input('sign-ups-date-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_top_countires(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
top_countries = pd.read_sql_query('EXEC DS_GetTopCountries \''+start_date+'\',\''+end_date+'\', 1', cnxn)
else:
top_countries = pd.read_csv(r'data\top_countries.csv')
top_countries.drop(columns=['Unnamed: 0'], inplace=True)
merged = pd.merge(top_countries, TrueCodes, left_on='CountryCode', right_on='rand', how='inner').sort_values(by = 'NumberOf', ascending = False)
trace = go.Bar(
x=list(merged['NumberOf'])[::-1],
y=list(merged['CountryCode'])[::-1],
text = list(merged['NumberOf'])[::-1],
textposition='auto',
orientation='h',
marker=dict(color="#3182bd"),
textfont=dict(
color=design_colors['text'],
size=14,
family='Arail',
),
hoverinfo = 'none',
)
data = [trace]
layout = {
'paper_bgcolor': design_colors['chart_box_bg'],
'plot_bgcolor': design_colors['chart_bg'],
'xaxis': {
'showgrid': False,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines']
},
'yaxis': {
'showgrid': False,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines']
},
"title": '<b>Top Countries<b>',
'titlefont' : dict(
size=title_size,
color=design_colors['title']
)
}
return {
'data': data,
'layout': layout
}
#-------------------------------------------------------------------- Active users
#------------------- 4_2 Daily active users
@app.callback(
dash.dependencies.Output('activity-container', 'figure'),
[dash.dependencies.Input('activity-picker-range', 'start_date'),
dash.dependencies.Input('activity-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_activity(start_date, end_date, is_live):
periodicity = '1'
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
UserActivities_Android = pd.read_sql_query('EXEC DS_GetUserActivities \''+start_date+'\',\''+end_date+'\','+periodicity+','+'1', cnxn)
UserActivities_iOS = pd.read_sql_query('EXEC DS_GetUserActivities \''+start_date+'\',\''+end_date+'\','+periodicity+','+'2', cnxn)
else:
UserActivities_Android = pd.read_csv(r'data\UserActivities_Android.csv')
UserActivities_Android.drop(columns=['Unnamed: 0'], inplace=True)
UserActivities_iOS = pd.read_csv(r'data\UserActivities_iOS.csv')
UserActivities_iOS.drop(columns=['Unnamed: 0'], inplace=True)
trace1 = go.Scatter(x = UserActivities_Android['Date'], y=UserActivities_Android['Checkins'], name = "Android",
marker = dict(color=design_colors['Android']),
showlegend=False
)
trace2 = go.Scatter(x = UserActivities_iOS['Date'], y=UserActivities_iOS['Checkins'], name = "iOS",
marker = dict(color=design_colors['iOS']),
showlegend=False
)
annotations = []
for i in range(len(UserActivities_Android)):
annotation_1 =dict(
x=UserActivities_iOS['Date'].values[i],
y=UserActivities_iOS['Checkins'].values[i],
xref='x',
yref='y',
text=str(np.round(UserActivities_iOS['Checkins'].values[i]/1000.0, 1)) + 'k',
showarrow=False,
yshift = 20,
font = dict(
color = design_colors['title'],
size = 10
)
)
annotation_2 =dict(
x=UserActivities_Android['Date'].values[i],
y=UserActivities_Android['Checkins'].values[i],
xref='x',
yref='y',
text=str(np.round(UserActivities_Android['Checkins'].values[i]/1000.0, 1)) + 'k',
showarrow=False,
yshift = 20,
font = dict(
color = design_colors['title'],
size = 10
)
)
annotations.append(annotation_1)
annotations.append(annotation_2)
layout = dict(title = "<b>Active Users<b>",
titlefont = dict(
size=title_size,
color=design_colors['title']
),
paper_bgcolor = design_colors['chart_box_bg'],
plot_bgcolor = design_colors['chart_bg'],
margin=go.layout.Margin(
l=50,
r=50,
b=50,
t=50,
# pad=20
),
xaxis = {
'showgrid': False,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines'],
'tickformat': '%b %d',
},
yaxis = {
'showgrid': True,
'tickfont': dict(color=design_colors['chart_axis_legends']),
'gridcolor': design_colors['chart_inside_lines'],
'range': [0,UserActivities_Android.Checkins.max()*1.3]
},
annotations=annotations
)
data = [trace1, trace2]
return {
'data': data,
'layout' : layout
}
#------------------- 4_3 Active users by platform
@app.callback(
dash.dependencies.Output('activity-pie-container', 'figure'),
[dash.dependencies.Input('activity-picker-range', 'start_date'),
dash.dependencies.Input('activity-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_activity_pie(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
PieActivities = pd.read_sql_query('EXEC DS_GetActivityByPlatform \''+start_date+'\',\''+end_date+'\'', cnxn)
else:
PieActivities = pd.read_csv(r'data\PieActivities.csv')
PieActivities.drop(columns=['Unnamed: 0'], inplace=True)
trace = go.Pie(labels=PieActivities.Platform,
values=PieActivities.CountOfUsers,
marker=dict(
colors=[design_colors['Android'],design_colors['iOS']]
),
textfont=dict(
size=20,
family='Arail',
),
)
data = [trace]
layout = dict(title = "<b>Platform Share<b>",
titlefont = dict(
size=title_size,
color=design_colors['title']
),
margin=go.layout.Margin(
l=50,
r=50,
b=50,
t=50,
# pad=20
),
paper_bgcolor = design_colors['chart_box_bg'],
plot_bgcolor = design_colors['chart_bg'],
legend = dict(
font=dict(color=design_colors['text'],
)
)
)
return {
'data': data,
'layout' : layout
}
#------------------- 4_4 Consumption Venn diagram
@app.callback(
dash.dependencies.Output('consumption-Venn-container', 'figure'),
[dash.dependencies.Input('venn-picker-range', 'start_date'),
dash.dependencies.Input('venn-picker-range', 'end_date'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_venn(start_date, end_date, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
if is_live:
data = pd.read_sql_query('EXEC DS_GetConsumption_LearnSocial @StartDate = \'' + start_date + '\', @EndDate = \'' + end_date + '\'',cnxn)
else:
data = pd.read_csv(r'data\venn_data.csv')
data.drop(columns=['Unnamed: 0'], inplace=True)
data = data.fillna(0)
data.loc[data.Lesson_Consumers > 0, 'Lesson_Consumers'] = 1
data.loc[data.Social_Content_Consumers > 0, 'Social_Content_Consumers'] = 1
data['LS'] = data.Lesson_Consumers + data.Social_Content_Consumers
a = len(data[data.Lesson_Consumers == 1]) - len(data[data.LS > 1])
b = len(data[data.Social_Content_Consumers == 1]) - len(data[data.LS > 1])
c = len(data[data.LS > 1])
r1 = np.sqrt((a + c) / np.pi)
r2 = np.sqrt((b + c) / np.pi)
dist = np.sqrt(c * 3 / np.pi)
data = [go.Scatter(
x=[r1 * 0.7, 2 * r1 - dist / 2, (2 * r1 + r2 - dist) * 1.3,2 * r1 - dist / 2],
y=[r1, r1, r1, -0.2*r1],
text=['{}({}%)'.format("Learn only<br>", np.round(a / (a + b + c) * 100, 1)),
'({}%)'.format(np.round(c / (a + b + c) * 100, 1)),
'{}({}%)'.format("Social only<br>", np.round(b / (a + b + c) * 100, 1)),
'{}({}%)'.format("Nothing Doers ", np.round(len(data[data.LS == 0]) / len(data)*100,1))],
mode='text',
textfont=dict(
color=design_colors['text'],
size=18,
family='Arail',
)
)]
layout = {
'title' : "<b>Activity<b>",
'titlefont' : dict(
size=title_size,
color=design_colors['title']
),
'paper_bgcolor' : design_colors['chart_box_bg'],
'plot_bgcolor' : design_colors['chart_bg'],
'xaxis': {
'showticklabels': False,
'showgrid': False,
'zeroline': False,
},
'yaxis': {
'showticklabels': False,
'showgrid': False,
'zeroline': False,
},
'shapes': [
{
'opacity': 0.3,
'xref': 'x',
'yref': 'y',
'fillcolor': '#154c75',
'x0': 0,
'y0': 0,
'x1': 2 * r1,
'y1': 2 * r1,
'type': 'circle',
'line': {
'color': '#154c75'
},
},
{
'opacity': 0.3,
'xref': 'x',
'yref': 'y',
'fillcolor': '#277fc1',
'x0': 2 * r1 - dist,
'y0': r1 - r2,
'x1': 2 * r1 - dist + 2 * r2,
'y1': r1 + r2,
'type': 'circle',
'line': {
'color': '#277fc1',
},
}
],
'margin': {
'l': 20,
'r': 20,
'b': 30
},
}
return {
'data': data,
'layout': layout
}
#---------------------------------------- 5_2 Creation: Daily trend
@app.callback(
dash.dependencies.Output('creation_objects-container', 'figure'),
[dash.dependencies.Input('creation-picker-range', 'start_date'),
dash.dependencies.Input('creation-picker-range', 'end_date'),
dash.dependencies.Input('platform_creation', 'value'),
dash.dependencies.Input('toggle-switch-1', 'value')])
def update_creation_percentage(start_date, end_date, creation_platform, is_live):
start_date = str(start_date)[:10]
end_date = str(end_date)[:10]
platform = '1' if creation_platform == 'android' else '2' if creation_platform == 'ios' else '3' if creation_platform == 'total' else '4'
if is_live:
data2 = pd.read_sql_query('EXEC DS_GetCreatedContent_byPlatform \''+start_date+'\',\''+end_date+'\','+platform, cnxn)
else:
if platform == '3':
data2 =
|
pd.read_csv(r'data\created_content.csv')
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 10 17:22:51 2019
Work flow: to obtain the TD products for use with ZWD (after download):
1)use fill_fix_all_10mins_IMS_stations() after copying the downloaded TD
2)use IMS_interpolating_to_GNSS_stations_israel(dt=None, start_year=2019(latest))
3)use resample_GNSS_TD(path=ims_path) to resample all TD
@author: ziskin
"""
from PW_paths import work_yuval
from pathlib import Path
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
ims_10mins_path = ims_path / '10mins'
awd_path = work_yuval/'AW3D30'
axis_path = work_yuval/'axis'
cwd = Path().cwd()
# fill missing data:
#some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds)
#
#In [20]: filled = some_missing.groupby('time.month').fillna(climatology.tmin)
#
#In [21]: both = xr.Dataset({'some_missing': some_missing, 'filled': filled})
# kabr, nzrt, katz, elro, klhv, yrcm, slom have ims stations not close to them!
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124', 'nizn': 'EZUZ'}
ims_units_dict = {
'BP': 'hPa',
'NIP': 'W/m^2',
'Rain': 'mm',
'TD': 'deg_C',
'WD': 'deg',
'WS': 'm/s',
'U': 'm/s',
'V': 'm/s',
'G': ''}
def save_daily_IMS_params_at_GNSS_loc(ims_path=ims_path,
param_name='WS', stations=[x for x in gnss_ims_dict.keys()]):
import xarray as xr
from aux_gps import save_ncfile
param = xr.open_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(param_name))
ims_stns = [gnss_ims_dict.get(x) for x in stations]
param = param[ims_stns]
param = param.resample(time='D', keep_attrs=True).mean(keep_attrs=True)
inv_dict = {v: k for k, v in gnss_ims_dict.items()}
for da in param:
param = param.rename({da: inv_dict.get(da)})
filename = 'GNSS_{}_daily.nc'.format(param_name)
save_ncfile(param, ims_path, filename)
return param
def produce_bet_dagan_long_term_pressure(path=ims_path, rate='1H',
savepath=None, fill_from_jerusalem=True):
import xarray as xr
from aux_gps import xr_reindex_with_date_range
from aux_gps import get_unique_index
from aux_gps import save_ncfile
from aux_gps import anomalize_xr
# load manual old measurements and new 3 hr ones:
bd_man = xr.open_dataset(
path / 'IMS_hourly_03hr.nc')['BET-DAGAN-MAN_2520_ps']
bd_auto = xr.open_dataset(path / 'IMS_hourly_03hr.nc')['BET-DAGAN_2523_ps']
bd = xr.concat(
[bd_man.dropna('time'), bd_auto.dropna('time')], 'time', join='inner')
bd = get_unique_index(bd)
bd = bd.sortby('time')
bd = xr_reindex_with_date_range(bd, freq='1H')
# remove dayofyear mean, interpolate and reconstruct signal to fill it with climatology:
climatology = bd.groupby('time.dayofyear').mean(keep_attrs=True)
bd_anoms = anomalize_xr(bd, freq='DOY')
bd_inter = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='24H', keep_attrs=True)
# bd_inter = bd.interpolate_na('time', max_gap='3H', method='cubic')
bd_inter = bd_inter.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.reset_coords(drop=True)
# load 10-mins new measurements:
bd_10 = xr.open_dataset(path / 'IMS_BP_israeli_hourly.nc')['BET-DAGAN']
bd_10 = bd_10.dropna('time').sel(
time=slice(
'2019-06-30T00:00:00',
None)).resample(
time='1H').mean()
bd_inter = xr.concat([bd_inter, bd_10], 'time', join='inner')
bd_inter = get_unique_index(bd_inter)
bd_inter = bd_inter.sortby('time')
bd_inter.name = 'bet-dagan'
bd_inter.attrs['action'] = 'interpolated from 3H'
if fill_from_jerusalem:
print('filling missing gaps from 2018 with jerusalem')
jr_10 = xr.load_dataset(
path / 'IMS_BP_israeli_hourly.nc')['JERUSALEM-CENTRE']
climatology = bd_inter.groupby('time.dayofyear').mean(keep_attrs=True)
jr_10_anoms = anomalize_xr(jr_10, 'DOY')
bd_anoms = anomalize_xr(bd_inter, 'DOY')
bd_anoms = xr.concat(
[bd_anoms.dropna('time'), jr_10_anoms.dropna('time')], 'time', join='inner')
bd_anoms = get_unique_index(bd_anoms)
bd_anoms = bd_anoms.sortby('time')
bd_anoms = xr_reindex_with_date_range(bd_anoms, freq='5T')
bd_anoms = bd_anoms.interpolate_na(
'time', method='cubic', max_gap='2H')
bd_anoms.name = 'bet-dagan'
bd_anoms.attrs['action'] = 'interpolated from 3H'
bd_anoms.attrs['filled'] = 'using Jerusalem-centre'
bd_anoms.attrs['long_name'] = 'Pressure Anomalies'
bd_anoms.attrs['units'] = 'hPa'
bd_inter = bd_anoms.groupby('time.dayofyear') + climatology
bd_inter = bd_inter.resample(
time='1H', keep_attrs=True).mean(keep_attrs=True)
# if savepath is not None:
# yr_min = bd_anoms.time.min().dt.year.item()
# yr_max = bd_anoms.time.max().dt.year.item()
# filename = 'IMS_BD_anoms_5min_ps_{}-{}.nc'.format(
# yr_min, yr_max)
# save_ncfile(bd_anoms, savepath, filename)
# return bd_anoms
if savepath is not None:
# filename = 'IMS_BD_hourly_ps.nc'
yr_min = bd_inter.time.min().dt.year.item()
yr_max = bd_inter.time.max().dt.year.item()
filename = 'IMS_BD_hourly_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_inter, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY', units='std')
filename = 'IMS_BD_hourly_anoms_std_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
bd_anoms = anomalize_xr(bd_inter, 'DOY')
filename = 'IMS_BD_hourly_anoms_ps_{}-{}.nc'.format(yr_min, yr_max)
save_ncfile(bd_anoms, savepath, filename)
return bd_inter
def transform_wind_speed_direction_to_u_v(path=ims_path, savepath=ims_path):
import xarray as xr
import numpy as np
WS = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
WD = xr.load_dataset(path / 'IMS_WD_israeli_10mins.nc')
# change angles to math:
WD = 270 - WD
U = WS * np.cos(np.deg2rad(WD))
V = WS * np.sin(np.deg2rad(WD))
print('updating attrs...')
for station in WS:
attrs = WS[station].attrs
attrs.update(channel_name='U')
attrs.update(units='m/s')
attrs.update(field_name='zonal velocity')
U[station].attrs = attrs
attrs.update(channel_name='V')
attrs.update(field_name='meridional velocity')
V[station].attrs = attrs
if savepath is not None:
filename = 'IMS_U_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in U.data_vars}
U.to_netcdf(savepath / filename, 'w', encoding=encoding)
filename = 'IMS_V_israeli_10mins.nc'
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in V.data_vars}
V.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return
def perform_harmonic_analysis_all_IMS(path=ims_path, var='BP', n=4,
savepath=ims_path):
import xarray as xr
from aux_gps import harmonic_analysis_xr
from aux_gps import keep_iqr
ims = xr.load_dataset(path / 'IMS_{}_israeli_10mins.nc'.format(var))
sites = [x for x in gnss_ims_dict.values()]
ims_actual_sites = [x for x in ims if x in sites]
ims = ims[ims_actual_sites]
if var == 'NIP':
ims = xr.merge([keep_iqr(ims[x]) for x in ims])
max_nip = ims.to_array('site').max()
ims /= max_nip
dss_list = []
for site in ims:
da = ims[site]
da = keep_iqr(da)
print('performing harmonic analysis for IMS {} field at {} site:'.format(var, site))
dss = harmonic_analysis_xr(da, n=n, anomalize=True, normalize=False)
dss_list.append(dss)
dss_all = xr.merge(dss_list)
dss_all.attrs['field'] = var
dss_all.attrs['units'] = ims_units_dict[var]
if savepath is not None:
filename = 'IMS_{}_harmonics_diurnal.nc'.format(var)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in dss_all.data_vars}
dss_all.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return dss_all
def align_10mins_ims_to_gnss_and_save(ims_path=ims_path, field='G7',
gnss_ims_dict=gnss_ims_dict,
savepath=work_yuval):
import xarray as xr
d = dict(zip(gnss_ims_dict.values(), gnss_ims_dict.keys()))
gnss_list = []
for station, gnss_site in d.items():
print('loading IMS station {}'.format(station))
ims_field = xr.load_dataset(
ims_path / 'IMS_{}_israeli_10mins.nc'.format(field))[station]
gnss = ims_field.load()
gnss.name = gnss_site
gnss.attrs['IMS_station'] = station
gnss_list.append(gnss)
gnss_sites = xr.merge(gnss_list)
if savepath is not None:
filename = 'GNSS_IMS_{}_israeli_10mins.nc'.format(field)
print('saving {} to {}'.format(filename, savepath))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in gnss_sites.data_vars}
gnss_sites.to_netcdf(savepath / filename, 'w', encoding=encoding)
print('Done!')
return gnss_sites
def produce_10mins_gustiness(path=ims_path, rolling=5):
import xarray as xr
from aux_gps import keep_iqr
from aux_gps import xr_reindex_with_date_range
ws = xr.load_dataset(path / 'IMS_WS_israeli_10mins.nc')
stations = [x for x in ws.data_vars]
g_list = []
for station in stations:
print('proccesing station {}'.format(station))
attrs = ws[station].attrs
g = ws[station].rolling(time=rolling, center=True).std(
) / ws[station].rolling(time=rolling, center=True).mean()
g = keep_iqr(g)
g = xr_reindex_with_date_range(g, freq='10min')
g.name = station
g.attrs = attrs
g_list.append(g)
G = xr.merge(g_list)
filename = 'IMS_G{}_israeli_10mins.nc'.format(rolling)
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in G.data_vars}
G.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return G
def produce_10mins_absolute_humidity(path=ims_path):
from sounding_procedures import wrap_xr_metpy_mixing_ratio
from aux_gps import dim_intersection
import xarray as xr
P = xr.load_dataset(path / 'IMS_BP_israeli_10mins.nc')
stations = [x for x in P.data_vars]
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')
T = T[stations].load()
RH = xr.open_dataset(path / 'IMS_RH_israeli_10mins.nc')
RH = RH[stations].load()
mr_list = []
for station in stations:
print('proccesing station {}'.format(station))
p = P[station]
t = T[station]
rh = RH[station]
new_time = dim_intersection([p, t, rh])
p = p.sel(time=new_time)
rh = rh.sel(time=new_time)
t = t.sel(time=new_time)
mr = wrap_xr_metpy_mixing_ratio(p, t, rh, verbose=True)
mr_list.append(mr)
MR = xr.merge(mr_list)
filename = 'IMS_MR_israeli_10mins.nc'
print('saving {} to {}'.format(filename, path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in MR.data_vars}
MR.to_netcdf(path / filename, 'w', encoding=encoding)
print('Done resampling!')
return MR
def produce_wind_frequency_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
gustiness = ws.rolling(time=5).std() / ws.rolling(time=5).mean()
gustiness = keep_iqr(gustiness)
gustiness_anoms = gustiness.groupby(
'time.month') - gustiness.groupby('time.month').mean('time')
gustiness_anoms = gustiness_anoms.reset_coords(drop=True)
G = gustiness_anoms.groupby('time.hour').mean('time')
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.grid()
ax2 = ax.twinx()
G.plot.line(ax=ax2, color='k', marker='o')
ax2.axhline(0, color='k', linestyle='--')
ax2.legend(['{} Gustiness anomalies'.format(station)],
loc='upper right')
ax2.set_ylabel('Gustiness anomalies')
return
def produce_gustiness(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', pw_station='tela', temp=False,
ax=None):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
from aux_gps import keep_iqr
from aux_gps import groupby_date_xr
from matplotlib.ticker import FixedLocator
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1-y2)/2, v2)
adjust_yaxis(ax1, (y2-y1)/2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny*(maxy+dy)/(miny+dy)
else:
nmaxy = maxy
nminy = maxy*(miny+dy)/(maxy+dy)
ax.set_ylim(nminy+v, nmaxy+v)
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
print('loading {} IMS station...'.format(station))
g = xr.open_dataset(path / 'IMS_G_israeli_10mins.nc')[station]
g.load()
g = g.sel(time=g['time.season'] == season)
date = groupby_date_xr(g)
# g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
g_anoms = g.groupby(date) - g.groupby(date).mean('time')
g_anoms = g_anoms.reset_coords(drop=True)
G = g_anoms.groupby('time.hour').mean('time')
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
G.plot(ax=ax, color='b', marker='o')
ax.set_title(
'Gustiness {} IMS station in {} season'.format(
station, season))
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.sel(time=pw['time.season'] == season)
date = groupby_date_xr(pw)
pw = pw.groupby(date) - pw.groupby(date).mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s', color='m',
label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return G
def produce_relative_frequency_wind_direction(path=ims_path,
station='TEL-AVIV-COAST',
season='DJF', with_weights=False,
pw_station='tela', temp=False,
plot=True):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
def make_patch_spines_invisible(ax):
ax.set_frame_on(True)
ax.patch.set_visible(False)
for sp in ax.spines.values():
sp.set_visible(False)
wd = xr.open_dataset(path / 'IMS_WD_israeli_10mins.nc')[station]
wd.load()
wd.name = 'WD'
wd = wd.sel(time=wd['time.season'] == season)
all_Q = wd.groupby('time.hour').count()
Q1 = wd.where((wd >= 0) & (wd < 90)).dropna('time')
Q2 = wd.where((wd >= 90) & (wd < 180)).dropna('time')
Q3 = wd.where((wd >= 180.1) & (wd < 270)).dropna('time')
Q4 = wd.where((wd >= 270) & (wd < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_freq = 100.0 * (Q.groupby('time.hour').count() / all_Q)
T = xr.open_dataset(path / 'IMS_TD_israeli_10mins.nc')[station]
T.load()
T = T.groupby('time.month') - T.groupby('time.month').mean('time')
T = T.reset_coords(drop=True)
T = T.sel(time=T['time.season'] == season)
T = T.groupby('time.hour').mean('time')
if with_weights:
ws = xr.open_dataset(path / 'IMS_WS_israeli_10mins.nc')[station]
ws.load()
ws = ws.sel(time=ws['time.season'] == season)
ws.name = 'WS'
wind = xr.merge([ws, wd])
wind = wind.dropna('time')
all_Q = wind['WD'].groupby('time.hour').count()
Q1 = wind['WS'].where(
(wind['WD'] >= 0) & (wind['WD'] < 90)).dropna('time')
Q2 = wind['WS'].where(
(wind['WD'] >= 90) & (wind['WD'] < 180)).dropna('time')
Q3 = wind['WS'].where(
(wind['WD'] >= 180) & (wind['WD'] < 270)).dropna('time')
Q4 = wind['WS'].where(
(wind['WD'] >= 270) & (wind['WD'] < 360)).dropna('time')
Q = xr.concat([Q1, Q2, Q3, Q4], 'Q')
Q['Q'] = [x + 1 for x in range(4)]
Q_ratio = (Q.groupby('time.hour').count() / all_Q)
Q_mean = Q.groupby('time.hour').mean() / Q.groupby('time.hour').max()
Q_freq = 100 * ((Q_mean * Q_ratio) / (Q_mean * Q_ratio).sum('Q'))
if plot:
fig, ax = plt.subplots(figsize=(16, 8))
for q in Q_freq['Q']:
Q_freq.sel(Q=q).plot(ax=ax)
ax.set_title(
'Relative wind direction frequency in {} IMS station in {} season'.format(
station, season))
ax.set_ylabel('Relative frequency [%]')
ax.set_xlabel('Time of day [UTC]')
ax.legend([r'0$\degree$-90$\degree$', r'90$\degree$-180$\degree$',
r'180$\degree$-270$\degree$', r'270$\degree$-360$\degree$'], loc='upper left')
ax.set_xticks(np.arange(0, 24, step=1))
ax.grid()
if pw_station is not None:
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_50_homogenized.nc')[pw_station]
pw.load().dropna('time')
pw = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw = pw.reset_coords(drop=True)
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
pw.plot.line(ax=axpw, color='k', marker='o')
axpw.axhline(0, color='k', linestyle='--')
axpw.legend(['{} PW anomalies'.format(
pw_station.upper())], loc='upper right')
axpw.set_ylabel('PW anomalies [mm]')
if temp:
axt = ax.twinx()
axt.spines["right"].set_position(("axes", 1.05))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
make_patch_spines_invisible(axt)
# Second, show the right spine.
axt.spines["right"].set_visible(True)
p3, = T.plot.line(ax=axt, marker='s',
color='m', label="Temperature")
axt.yaxis.label.set_color(p3.get_color())
axt.tick_params(axis='y', colors=p3.get_color())
axt.set_ylabel('Temperature anomalies [$C\degree$]')
return Q_freq
def plot_closest_line_from_point_to_israeli_coast(point, ax=None, epsg=None,
path=gis_path, color='k',
ls='-', lw=1.0):
import matplotlib.pyplot as plt
from shapely.geometry import LineString
from pyproj import Geod
"""returns the distance in kms"""
coast_gdf = get_israeli_coast_line(path=path, epsg=epsg)
coast_pts = coast_gdf.geometry.unary_union
point_in_coast = get_closest_point_from_a_line_to_a_point(point, coast_pts)
AB = LineString([point_in_coast, point])
if ax is None:
fig, ax = plt.subplots()
ax.plot(*AB.xy, color='k', linestyle=ls, linewidth=lw)
geod = Geod(ellps="WGS84")
distance = geod.geometry_length(AB) / 1000.0
return distance
def get_closest_point_from_a_line_to_a_point(point, line):
from shapely.ops import nearest_points
p1, p2 = nearest_points(point, line)
return p2
def get_israeli_coast_line(path=gis_path, minx=34.0, miny=30.0, maxx=36.0,
maxy=34.0, epsg=None):
"""use epsg=2039 to return in meters"""
from shapely.geometry import box
import geopandas as gpd
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# read world coast lines:
coast = gpd.read_file(gis_path / 'ne_10m_coastline.shp')
# clip:
gdf = gpd.clip(coast, bbox)
if epsg is not None:
gdf = gdf.to_crs('epsg:{}'.format(epsg))
return gdf
def clip_raster(fp=awd_path/'Israel_Area.tif',
out_tif=awd_path/'israel_dem.tif',
minx=34.0, miny=29.0, maxx=36.5, maxy=34.0):
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame in such a manner that
rasterio wants them"""
import json
return [json.loads(gdf.to_json())['features'][0]['geometry']]
import rasterio
from rasterio.plot import show
from rasterio.plot import show_hist
from rasterio.mask import mask
from shapely.geometry import box
import geopandas as gpd
from fiona.crs import from_epsg
import pycrs
print('reading {}'.format(fp))
data = rasterio.open(fp)
# create bounding box using shapely:
bbox = box(minx, miny, maxx, maxy)
# insert the bbox into a geodataframe:
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0], crs=from_epsg(4326))
# re-project with the same projection as the data:
geo = geo.to_crs(crs=data.crs.data)
# get the geometry coords:
coords = getFeatures(geo)
# clipping is done with mask:
out_img, out_transform = mask(dataset=data, shapes=coords, crop=True)
# copy meta data:
out_meta = data.meta.copy()
# parse the epsg code:
epsg_code = int(data.crs.data['init'][5:])
# update the meta data:
out_meta.update({"driver": "GTiff",
"height": out_img.shape[1],
"width": out_img.shape[2],
"transform": out_transform,
"crs": pycrs.parse.from_epsg_code(epsg_code).to_proj4()})
# save to disk:
print('saving {} to disk.'.format(out_tif))
with rasterio.open(out_tif, "w", **out_meta) as dest:
dest.write(out_img)
print('Done!')
return
def create_israel_area_dem(path):
"""merge the raw DSM tif files from AW3D30 model of Israel area togather"""
from aux_gps import path_glob
import rasterio
from rasterio.merge import merge
src_files_to_mosaic = []
files = path_glob(path, '*DSM*.tif')
for fp in files:
src = rasterio.open(fp)
src_files_to_mosaic.append(src)
mosaic, out_trans = merge(src_files_to_mosaic)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans,
"crs": src.crs
}
)
with rasterio.open(path/'Israel_Area.tif', "w", **out_meta) as dest:
dest.write(mosaic)
return
def parse_cv_results(grid_search_cv):
from aux_gps import process_gridsearch_results
"""parse cv_results from GridsearchCV object"""
# only supports neg-abs-mean-error with leaveoneout
from sklearn.model_selection import LeaveOneOut
if (isinstance(grid_search_cv.cv, LeaveOneOut)
and grid_search_cv.scoring == 'neg_mean_absolute_error'):
cds = process_gridsearch_results(grid_search_cv)
cds = - cds
return cds
def IMS_interpolating_to_GNSS_stations_israel(dt='2013-10-19T22:00:00',
stations=None,
lapse_rate='auto',
method='okrig',
variogram='spherical',
n_neighbors=3,
start_year='1996',
cut_days_ago=3,
plot=False,
verbose=False,
savepath=ims_path,
network='soi-apn',
axis_path=axis_path,
ds_td=None):
"""interpolate the IMS 10 mins field(e.g., TD) to the location
of the GNSS sites in ISRAEL(use dt=None for this). other dt is treated
as datetime str and will give the "snapshot" for the field for just this
datetime"""
from pykrige.rk import Krige
import pandas as pd
from aux_gps import path_glob
import xarray as xr
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import geopandas as gpd
from sklearn.neighbors import KNeighborsRegressor
from axis_process import read_axis_stations
# import time
def pick_model(method, variogram, n_neighbors):
if method == 'okrig':
if variogram is not None:
model = Krige(method='ordinary', variogram_model=variogram,
verbose=verbose)
else:
model = Krige(method='ordinary', variogram_model='linear',
verbose=verbose)
elif method == 'knn':
if n_neighbors is None:
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
else:
model = KNeighborsRegressor(
n_neighbors=n_neighbors, weights='distance')
else:
raise Exception('{} is not supported yet...'.format(method))
return model
def prepare_Xy(ts_lr_neutral, T_lats, T_lons):
import numpy as np
df = ts_lr_neutral.to_frame()
df['lat'] = T_lats
df['lon'] = T_lons
# df = df.dropna(axis=0)
c = np.linspace(
df['lat'].min(),
df['lat'].max(),
df['lat'].shape[0])
r = np.linspace(
df['lon'].min(),
df['lon'].max(),
df['lon'].shape[0])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(ts_lr_neutral)
X = np.column_stack([rr[vals, vals], cc[vals, vals]])
# rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = ts_lr_neutral[vals]
return X, y
def neutrilize_t(ts_vs_alt, lapse_rate):
ts_lr_neutral = (ts_vs_alt +
lapse_rate *
ts_vs_alt.index /
1000.0)
return ts_lr_neutral
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
# try:
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
# except TypeError as e:
# print('{}, dt: {}'.format(e, dt))
# print(ts_vs_alt)
# return
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# import time
dt = pd.to_datetime(dt)
# read Israeli GNSS sites coords:
if network == 'soi-apn':
df = pd.read_csv(
cwd /
'israeli_gnss_coords.txt',
delim_whitespace=True,
header=0)
elif network == 'axis':
df = read_axis_stations(path=axis_path)
# use station=None to pick all stations, otherwise pick one...
if stations is not None:
if isinstance(stations, str):
stations = [stations]
df = df.loc[stations, :]
print('selected only {} stations'.format(stations))
else:
print('selected all {} stations.'.format(network))
# prepare lats and lons of gnss sites:
gps_lats = np.linspace(df.lat.min(), df.lat.max(), df.lat.values.shape[0])
gps_lons = np.linspace(df.lon.min(), df.lon.max(), df.lon.values.shape[0])
gps_lons_lats_as_cols = np.column_stack([gps_lons, gps_lats])
# load IMS temp data:
if ds_td is None:
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
else:
ds = ds_td
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice(start_year, None)})
years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
if cut_days_ago is not None:
# use cut_days_ago to drop last x days of data:
# this is vital bc towards the newest data, TD becomes scarce bc not
# all of the stations data exists...
n = cut_days_ago * 144
tdf.drop(tdf.tail(n).index, inplace=True)
print('last date to be handled is {}'.format(tdf.index[-1]))
# use this to solve for a specific datetime:
if dt is not None:
dt_col = dt.strftime('%Y-%m-%d %H:%M')
# t0 = time.time()
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(
tdf, dt, T_alts, lapse_rate)
if plot:
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'b'}, ax=ax_lapse)
suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]')
ax_lapse.set_ylabel('Temperature [degC]')
ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
transform=ax_lapse.transAxes, fontsize=12, color='k',
fontweight='bold')
ax_lapse.grid()
ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
# neutrilize the lapse rate effect:
ts_lr_neutral = neutrilize_t(ts_vs_alt, lapse_rate)
# prepare the regressors(IMS stations coords) and the
# target(IMS temperature at the coords):
X, y = prepare_Xy(ts_lr_neutral, T_lats, T_lons)
# pick the model and params:
model = pick_model(method, variogram, n_neighbors)
# fit the model:
model.fit(X, y)
# predict at the GNSS stations coords:
interpolated = model.predict(
gps_lons_lats_as_cols).reshape((gps_lats.shape))
# add prediction to df:
df[dt_col] = interpolated
# fix for lapse rate:
df[dt_col] -= lapse_rate * df['alt'] / 1000.0
# concat gnss stations and Tloc DataFrames:
Tloc_df = pd.DataFrame(T_lats, index=tdf.columns)
Tloc_df.columns = ['lat']
Tloc_df['lon'] = T_lons
Tloc_df['alt'] = T_alts
all_df = pd.concat([df, Tloc_df], axis=0)
# fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'
# fname = gis_path / 'gadm36_ISR_0.shp'
# ax = plt.axes(projection=ccrs.PlateCarree())
if plot:
fig, ax = plt.subplots(figsize=(6, 10))
# shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
# shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')
isr = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
# shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries
isr.crs = {'init': 'epsg:4326'}
time_snap = gpd.GeoDataFrame(all_df, geometry=gpd.points_from_xy(all_df.lon,
all_df.lat),
crs=isr.crs)
time_snap = gpd.sjoin(time_snap, isr, op='within')
isr.plot(ax=ax)
cmap = plt.get_cmap('rainbow', 10)
time_snap.plot(ax=ax, column=dt_col, cmap=cmap,
edgecolor='black', legend=True)
for x, y, label in zip(df.lon, df.lat,
df.index):
ax.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points")
suptitle = dt.strftime('%Y-%m-%d %H:%M')
fig.suptitle(suptitle, fontsize=14, fontweight='bold')
else:
# do the above (except plotting) for the entire data, saving each year:
for year in years:
dts = tdf.index[tdf.index.year == year]
# read Israeli GNSS sites coords again:
if network == 'soi-apn':
df = pd.read_csv(
cwd /
'israeli_gnss_coords.txt',
delim_whitespace=True,
header=0)
elif network == 'axis':
df = read_axis_stations(path=axis_path)
cnt = 1
dt_col_list = []
inter_list = []
# t0 = time.time()
# t1 = time.time()
# t2 = time.time()
# t3 = time.time()
# t4 = time.time()
# t5 = time.time()
# t6 = time.time()
# t7 = time.time()
# t8 = time.time()
for dt in dts:
dt_col = dt.strftime('%Y-%m-%d %H:%M')
if np.mod(cnt, 144) == 0:
# t1 = time.time()
print('working on {}'.format(dt_col))
# print('time1:{:.2f} seconds'.format(t1-t0))
# t0 = time.time()
# prepare the ims coords and temp df(Tloc_df) and
# the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(
tdf, dt, T_alts, lapse_rate)
# if np.mod(cnt, 144) == 0:
# t2 = time.time()
# print('time2: {:.4f}'.format((t2-t1)*144))
# neutrilize the lapse rate effect:
ts_lr_neutral = neutrilize_t(ts_vs_alt, lapse_rate)
# prepare the regressors(IMS stations coords) and the
# target(IMS temperature at the coords):
# if np.mod(cnt, 144) == 0:
# t3 = time.time()
# print('time3: {:.4f}'.format((t3-t2)*144))
X, y = prepare_Xy(ts_lr_neutral, T_lats, T_lons)
# if np.mod(cnt, 144) == 0:
# t4 = time.time()
# print('time4: {:.4f}'.format((t4-t3)*144))
# pick model and params:
model = pick_model(method, variogram, n_neighbors)
# if np.mod(cnt, 144) == 0:
# t5 = time.time()
# print('time5: {:.4f}'.format((t5-t4)*144))
# fit the model:
model.fit(X, y)
# if np.mod(cnt, 144) == 0:
# t6 = time.time()
# print('time6: {:.4f}'.format((t6-t5)*144))
# predict at the GNSS stations coords:
interpolated = model.predict(
gps_lons_lats_as_cols).reshape((gps_lats.shape))
# if np.mod(cnt, 144) == 0:
# t7 = time.time()
# print('time7: {:.4f}'.format((t7-t6)*144))
# fix for lapse rate:
interpolated -= lapse_rate * df['alt'].values / 1000.0
# if np.mod(cnt, 144) == 0:
# t8 = time.time()
# print('time8: {:.4f}'.format((t8-t7)*144))
# add to list:
dt_col_list.append(dt_col)
inter_list.append(interpolated)
cnt += 1
# convert to dataset:
# da = xr.DataArray(df.iloc[:, 3:].values, dims=['station', 'time'])
da = xr.DataArray(inter_list, dims=['time', 'station'])
da['station'] = df.index
da['time'] = pd.to_datetime(dt_col_list)
da = da.sortby('time')
ds = da.to_dataset(dim='station')
for da in ds:
ds[da].attrs['units'] = 'degC'
if savepath is not None:
filename = 'GNSS_TD_{}.nc'.format(year)
ds.to_netcdf(savepath / filename, 'w')
print('saved {} to {}'.format(filename, savepath))
# return
if savepath is not None:
print('concatenating all TD years...')
concat_GNSS_TD(savepath)
# t1 = time.time()
# geo_snap = geo_pandas_time_snapshot(var='TD', datetime=dt, plot=False)
# total = t1-t0
# print(total)
return ds
def resample_GNSS_TD(path=ims_path):
from aux_gps import path_glob
import xarray as xr
from aux_gps import get_unique_index
def resample_GNSS_TD(ds, path, sample, sample_rate='1H'):
# station = da.name
ds = get_unique_index(ds)
print('resampaling all GNSS stations to {}'.format(
sample[sample_rate]))
years = [str(x)
for x in sorted(list(set(ds[time_dim].dt.year.values)))]
ymin = ds[time_dim].min().dt.year.item()
ymax = ds[time_dim].max().dt.year.item()
years_str = '{}_{}'.format(ymin, ymax)
if sample_rate == '1H' or sample_rate == '3H':
dsr_list = []
for year in years:
print('resampling {} of year {}'.format(sample_rate, year))
dsr = ds.sel({time_dim: year}).resample(
{time_dim: sample_rate}, keep_attrs=True, skipna=True).mean(keep_attrs=True)
dsr_list.append(dsr)
print('concatenating...')
dsr = xr.concat(dsr_list, time_dim)
else:
if sample_rate == '5min':
dsr = ds.resample({time_dim: sample_rate}, keep_attrs=True,
skipna=True).ffill()
else:
dsr = ds.resample({time_dim: sample_rate},
keep_attrs=True,
skipna=True).mean(keep_attrs=True)
new_filename = '_'.join(['GNSS', sample[sample_rate], 'TD_ALL',
years_str])
new_filename = new_filename + '.nc'
print('saving all resmapled GNSS stations to {}'.format(path))
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in dsr.data_vars}
dsr.to_netcdf(path / new_filename, 'w', encoding=encoding)
print('Done resampling!')
return
# first, load GNSS_TD_ALL:
str_glob = 'GNSS_TD_ALL*.nc'
file = sorted(path_glob(path, str_glob))[-1]
print(file)
ds = xr.open_dataset(file)
ds.load()
time_dim = list(set(ds.dims))[0]
sample = {'5min': '5mins', '1H': 'hourly', '3H': '3hourly',
'D': 'Daily', 'W': 'weekly', 'MS': 'monthly'}
for key in sample.keys():
resample_GNSS_TD(ds, path, sample, sample_rate=key)
# for sta in stations:
# # take each station's TD and copy to GNSS folder 'temperature':
# savepath = GNSS / sta / 'temperature'
# savepath.mkdir(parents=True, exist_ok=True)
# # first save a 5-min resampled version and save:
# da = ds[sta].resample(time='5min').ffill()
# ymin = da[time_dim].min().dt.year.item()
# ymax = da[time_dim].max().dt.year.item()
# years_str = '{}_{}'.format(ymin, ymax)
# new_filename = '_'.join([sta.upper(), 'TD', years_str])
# new_filename = new_filename + '.nc'
# print('saving resmapled station {} to {}'.format(sta, savepath))
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in da.to_dataset(name=da.name).data_vars}
# da.to_netcdf(savepath / new_filename, 'w', encoding=encoding)
# print('Done resampling!')
# # finally, resample to all samples and save:
# for key in sample.keys():
# resample_GNSS_TD(da, savepath, sample, sample_rate=key)
return
def concat_GNSS_TD(path=ims_path):
import xarray as xr
from aux_gps import path_glob
files = path_glob(path, 'GNSS_TD_*.nc')
years = sorted([file.as_posix().split('/')[-1].split('_')[-1].split('.')[0]
for file in files])
ds_list = [xr.open_dataset(x) for x in files]
time_dim = list(set(ds_list[0].dims))[0]
ds = xr.concat(ds_list, time_dim)
ds = ds.sortby(time_dim)
comp = dict(zlib=True, complevel=9) # best compression
encoding = {var: comp for var in ds.data_vars}
filename = 'GNSS_TD_ALL_{}-{}.nc'.format(years[0], years[-1])
print('saving...')
ds.to_netcdf(path / filename, 'w', encoding=encoding)
print('{} was saved to {}'.format(filename, path))
return ds
def Interpolating_models_ims(time='2013-10-19T22:00:00', var='TD', plot=True,
gis_path=gis_path, method='okrig',
dem_path=work_yuval / 'AW3D30', lapse_rate=5.,
cv=None, rms=None, gridsearch=False):
"""main 2d_interpolation from stations to map"""
# cv usage is {'kfold': 5} or {'rkfold': [2, 3]}
# TODO: try 1d modeling first, like T=f(lat)
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.neighbors import KNeighborsRegressor
from pykrige.rk import Krige
import numpy as np
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from scipy.spatial import Delaunay
from scipy.interpolate import griddata
from sklearn.metrics import mean_squared_error
from aux_gps import coarse_dem
import seaborn as sns
import matplotlib.pyplot as plt
import pyproj
from sklearn.utils.estimator_checks import check_estimator
from pykrige.compat import GridSearchCV
lla = pyproj.Proj(proj='latlong', ellps='WGS84', datum='WGS84')
ecef = pyproj.Proj(proj='geocent', ellps='WGS84', datum='WGS84')
def parse_cv(cv):
from sklearn.model_selection import KFold
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import LeaveOneOut
"""input:cv number or string"""
# check for integer:
if 'kfold' in cv.keys():
n_splits = cv['kfold']
print('CV is KFold with n_splits={}'.format(n_splits))
return KFold(n_splits=n_splits)
if 'rkfold' in cv.keys():
n_splits = cv['rkfold'][0]
n_repeats = cv['rkfold'][1]
print('CV is ReapetedKFold with n_splits={},'.format(n_splits) +
' n_repeates={}'.format(n_repeats))
return RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats,
random_state=42)
if 'loo' in cv.keys():
return LeaveOneOut()
# from aux_gps import scale_xr
da = create_lat_lon_mesh(points_per_degree=250) # 500?
awd = coarse_dem(da)
awd = awd.values
geo_snap = geo_pandas_time_snapshot(var=var, datetime=time, plot=False)
if var == 'TD':
[a, b] = np.polyfit(geo_snap['alt'].values, geo_snap['TD'].values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(data=geo_snap, x='alt', y='TD', color='r',
scatter_kws={'color': 'b'}, ax=ax_lapse)
suptitle = time.replace('T', ' ')
ax_lapse.set_xlabel('Altitude [m]')
ax_lapse.set_ylabel('Temperature [degC]')
ax_lapse.text(0.5, 0.95, 'Lapse_rate: {:.2f} degC/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
transform=ax_lapse.transAxes, fontsize=12, color='k',
fontweight='bold')
ax_lapse.grid()
ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
# fig.suptitle(suptitle, fontsize=14, fontweight='bold')
alts = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
alt = row['alt']
if lapse_rate is not None and var == 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var] + \
lapse_rate * alt / 1000.0
alts.append(alt)
elif lapse_rate is None or var != 'TD':
da.loc[{'lat': lat, 'lon': lon}] = row[var]
alts.append(alt)
# da_scaled = scale_xr(da)
c = np.linspace(min(da.lat.values), max(da.lat.values), da.shape[0])
r = np.linspace(min(da.lon.values), max(da.lon.values), da.shape[1])
rr, cc = np.meshgrid(r, c)
vals = ~np.isnan(da.values)
if lapse_rate is None:
Xrr, Ycc, Z = pyproj.transform(
lla, ecef, rr[vals], cc[vals], np.array(alts), radians=False)
X = np.column_stack([Xrr, Ycc, Z])
XX, YY, ZZ = pyproj.transform(lla, ecef, rr, cc, awd.values,
radians=False)
rr_cc_as_cols = np.column_stack(
[XX.flatten(), YY.flatten(), ZZ.flatten()])
else:
X = np.column_stack([rr[vals], cc[vals]])
rr_cc_as_cols = np.column_stack([rr.flatten(), cc.flatten()])
# y = da_scaled.values[vals]
y = da.values[vals]
if method == 'gp-rbf':
from sklearn.gaussian_process.kernels import RBF
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = 1.0 * RBF(length_scale=0.25, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
# kernel = None
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'gp-qr':
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.gaussian_process.kernels import WhiteKernel
kernel = RationalQuadratic(length_scale=100.0) \
+ WhiteKernel(noise_level=0.01, noise_level_bounds=(1e-10, 1e+1))
model = GaussianProcessRegressor(alpha=0.0, kernel=kernel,
n_restarts_optimizer=5,
random_state=42, normalize_y=True)
elif method == 'knn':
model = KNeighborsRegressor(n_neighbors=5, weights='distance')
elif method == 'svr':
model = SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.1,
gamma='auto_deprecated', kernel='rbf', max_iter=-1,
shrinking=True, tol=0.001, verbose=False)
elif method == 'okrig':
model = Krige(method='ordinary', variogram_model='spherical',
verbose=True)
elif method == 'ukrig':
model = Krige(method='universal', variogram_model='linear',
verbose=True)
# elif method == 'okrig3d':
# # don't bother - MemoryError...
# model = OrdinaryKriging3D(rr[vals], cc[vals], np.array(alts),
# da.values[vals], variogram_model='linear',
# verbose=True)
# awd = coarse_dem(da)
# interpolated, ss = model.execute('grid', r, c, awd['data'].values)
# elif method == 'rkrig':
# # est = LinearRegression()
# est = RandomForestRegressor()
# model = RegressionKriging(regression_model=est, n_closest_points=5,
# verbose=True)
# p = np.array(alts).reshape(-1, 1)
# model.fit(p, X, y)
# P = awd.flatten().reshape(-1, 1)
# interpolated = model.predict(P, rr_cc_as_cols).reshape(da.values.shape)
# try:
# u = check_estimator(model)
# except TypeError:
# u = False
# pass
if cv is not None and not gridsearch: # and u is None):
# from sklearn.model_selection import cross_validate
from sklearn import metrics
cv = parse_cv(cv)
ytests = []
ypreds = []
for train_idx, test_idx in cv.split(X):
X_train, X_test = X[train_idx], X[test_idx] # requires arrays
y_train, y_test = y[train_idx], y[test_idx]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# there is only one y-test and y-pred per iteration over the loo.split,
# so to get a proper graph, we append them to respective lists.
ytests += list(y_test)
ypreds += list(y_pred)
true_vals = np.array(ytests)
predicted = np.array(ypreds)
r2 = metrics.r2_score(ytests, ypreds)
ms_error = metrics.mean_squared_error(ytests, ypreds)
print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
if gridsearch:
cv = parse_cv(cv)
param_dict = {"method": ["ordinary", "universal"],
"variogram_model": ["linear", "power", "gaussian",
"spherical"],
# "nlags": [4, 6, 8],
# "weight": [True, False]
}
estimator = GridSearchCV(Krige(), param_dict, verbose=True, cv=cv,
scoring='neg_mean_absolute_error',
return_train_score=True, n_jobs=1)
estimator.fit(X, y)
if hasattr(estimator, 'best_score_'):
print('best_score = {:.3f}'.format(estimator.best_score_))
print('best_params = ', estimator.best_params_)
return estimator
# if (cv is not None and not u):
# from sklearn import metrics
# cv = parse_cv(cv)
# ytests = []
# ypreds = []
# for train_idx, test_idx in cv.split(X):
# X_train, X_test = X[train_idx], X[test_idx] # requires arrays
# y_train, y_test = y[train_idx], y[test_idx]
# model = UniversalKriging(X_train[:, 0], X_train[:, 1], y_train,
# variogram_model='linear', verbose=False,
# enable_plotting=False)
# model.X_ORIG = X_train[:, 0]
# model.X_ADJUSTED = model.X_ORIG
# model.Y_ORIG = X_train[:, 1]
# model.Y_ADJUSTED = model.Y_ORIG
# model.Z = y_train
# y_pred, ss = model.execute('points', X_test[0, 0],
# X_test[0, 1])
# # there is only one y-test and y-pred per iteration over the loo.split,
# # so to get a proper graph, we append them to respective lists.
# ytests += list(y_test) cmap = plt.get_cmap('spring', 10)
Q = ax.quiver(isr['X'], isr['Y'], isr['U'], isr['V'],
isr['cm_per_year'], cmap=cmap)
fig.colorbar(Q, extend='max')
# ypreds += list(y_pred)
# true_vals = np.array(ytests)
# predicted = np.array(ypreds)
# r2 = metrics.r2_score(ytests, ypreds)
# ms_error = metrics.mean_squared_error(ytests, ypreds)
# print("R^2: {:.5f}%, MSE: {:.5f}".format(r2*100, ms_error))
# cv_results = cross_validate(gp, X, y, cv=cv, scoring='mean_squared_error',
# return_train_score=True, n_jobs=-1)
# test = xr.DataArray(cv_results['test_score'], dims=['kfold'])
# train = xr.DataArray(cv_results['train_score'], dims=['kfold'])
# train.name = 'train'
# cds = test.to_dataset(name='test')
# cds['train'] = train
# cds['kfold'] = np.arange(len(cv_results['test_score'])) + 1
# cds['mean_train'] = cds.train.mean('kfold')
# cds['mean_test'] = cds.test.mean('kfold')
# interpolated=griddata(X, y, (rr, cc), method='nearest')
model.fit(X, y)
interpolated = model.predict(rr_cc_as_cols).reshape(da.values.shape)
da_inter = da.copy(data=interpolated)
if lapse_rate is not None and var == 'TD':
da_inter -= lapse_rate * awd / 1000.0
if (rms is not None and cv is None): # or (rms is not None and not u):
predicted = []
true_vals = []
for i, row in geo_snap.iterrows():
lat = da.sel(lat=row['lat'], method='nearest').lat.values
lon = da.sel(lon=row['lon'], method='nearest').lon.values
pred = da_inter.loc[{'lat': lat, 'lon': lon}].values.item()
true = row[var]
predicted.append(pred)
true_vals.append(true)
predicted = np.array(predicted)
true_vals = np.array(true_vals)
ms_error = mean_squared_error(true_vals, predicted)
print("MSE: {:.5f}".format(ms_error))
if plot:
import salem
from salem import DataLevels, Map
import cartopy.crs as ccrs
# import cartopy.io.shapereader as shpreader
import matplotlib.pyplot as plt
# fname = gis_path / 'ne_10m_admin_0_sovereignty.shp'
# fname = gis_path / 'gadm36_ISR_0.shp'
# ax = plt.axes(projection=ccrs.PlateCarree())
f, ax = plt.subplots(figsize=(6, 10))
# shdf = salem.read_shapefile(salem.get_demo_file('world_borders.shp'))
shdf = salem.read_shapefile(gis_path / 'Israel_and_Yosh.shp')
# shdf = shdf.loc[shdf['CNTRY_NAME'] == 'Israel'] # remove other countries
shdf.crs = {'init': 'epsg:4326'}
dsr = da_inter.salem.roi(shape=shdf)
grid = dsr.salem.grid
grid = da_inter.salem.grid
sm = Map(grid)
# sm.set_shapefile(gis_path / 'Israel_and_Yosh.shp')
# sm = dsr.salem.quick_map(ax=ax)
# sm2 = salem.Map(grid, factor=1)
# sm2.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
# edgecolor='k')
sm.set_data(dsr)
# sm.set_nlevels(7)
# sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
# cbar_title='degC')
sm.set_shapefile(gis_path/'gis_osm_water_a_free_1.shp',
edgecolor='k') # , facecolor='aqua')
# sm.set_topography(awd.values, crs=awd.crs)
# sm.set_rgb(crs=shdf.crs, natural_earth='hr') # ad
# lakes = salem.read_shapefile(gis_path/'gis_osm_water_a_free_1.shp')
sm.set_cmap(cm='rainbow')
sm.visualize(ax=ax, title='Israel {} interpolated temperature from IMS'.format(method),
cbar_title='degC')
dl = DataLevels(geo_snap[var], levels=sm.levels)
dl.set_cmap(sm.cmap)
x, y = sm.grid.transform(geo_snap.lon.values, geo_snap.lat.values)
ax.scatter(x, y, color=dl.to_rgb(), s=20,
edgecolors='k', linewidths=0.5)
suptitle = time.replace('T', ' ')
f.suptitle(suptitle, fontsize=14, fontweight='bold')
if (rms is not None or cv is not None) and (not gridsearch):
import seaborn as sns
f, ax = plt.subplots(1, 2, figsize=(12, 6))
sns.scatterplot(x=true_vals, y=predicted, ax=ax[0], marker='.',
s=100)
resid = predicted - true_vals
sns.distplot(resid, bins=5, color='c', label='residuals',
ax=ax[1])
rmean = np.mean(resid)
rstd = np.std(resid)
rmedian = np.median(resid)
rmse = np.sqrt(mean_squared_error(true_vals, predicted))
plt.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
_, max_ = plt.ylim()
plt.text(rmean + rmean / 10, max_ - max_ / 10,
'Mean: {:.2f}, RMSE: {:.2f}'.format(rmean, rmse))
f.tight_layout()
# lakes.plot(ax=ax, color='b', edgecolor='k')
# lake_borders = gpd.overlay(countries, capitals, how='difference')
# adm1_shapes = list(shpreader.Reader(fname).geometries())
# ax = plt.axes(projection=ccrs.PlateCarree())
# ax.coastlines(resolution='10m')
# ax.add_geometries(adm1_shapes, ccrs.PlateCarree(),
# edgecolor='black', facecolor='gray', alpha=0.5)
# da_inter.plot.pcolormesh('lon', 'lat', ax=ax)
# geo_snap.plot(ax=ax, column=var, cmap='viridis', edgecolor='black',
# legend=False)
return da_inter
def create_lat_lon_mesh(lats=[29.5, 33.5], lons=[34, 36],
points_per_degree=1000):
import xarray as xr
import numpy as np
lat = np.arange(lats[0], lats[1], 1.0 / points_per_degree)
lon = np.arange(lons[0], lons[1], 1.0 / points_per_degree)
nans = np.nan * np.ones((len(lat), len(lon)))
da = xr.DataArray(nans, dims=['lat', 'lon'])
da['lat'] = lat
da['lon'] = lon
return da
# def read_save_ims_10mins(path=ims_10mins_path, var='TD'):
# import xarray as xr
# search_str = '*' + var + '_10mins.nc'
# da_list = []
# for file_and_path in path.glob(search_str):
# da = xr.load_dataarray(file_and_path)
# print('reading ims 10mins {} data for {} station'.format(var, da.name))
# da_list.append(da)
# print('merging...')
# ds = xr.merge(da_list)
# comp = dict(zlib=True, complevel=9) # best compression
# encoding = {var: comp for var in ds.data_vars}
# filename = 'ims_' + var + '_10mins.nc'
# print('saving...')
# ds.to_netcdf(path / filename, 'w', encoding=encoding)
# print('{} was saved to {}.'.format(filename, path))
# return ds
def analyse_10mins_ims_field(path=ims_10mins_path, var='TD',
gis_path=gis_path, dem_path=work_yuval/'AW3D30',
ds=None):
import xarray as xr
import collections
import numpy as np
# TODO: make 2d histogram of stations by altitude and time...
awd = xr.open_rasterio(dem_path / 'israel_dem.tif')
awd = awd.squeeze(drop=True)
if ds is None:
filename = 'ims_' + var + '_10mins.nc'
ds = xr.open_dataset(path / filename)
meta = read_ims_metadata_from_files(path=gis_path,
freq='10mins_csv')
meta.index = meta.ID.astype('int')
meta.drop('ID', axis=1, inplace=True)
meta.sort_index(inplace=True)
# there are some stations with the same altitude, i'm mapping them:
duplicate_alts = [item for item, count in collections.Counter(
meta['alt']).items() if count > 1]
print(duplicate_alts)
# then replacing them with a 1-meter seperations:
for dup in duplicate_alts:
dup_size = len(meta.loc[meta['alt'] == dup, 'alt'])
start_value = meta.loc[meta['alt'] == dup, 'alt'].values[0]
replace_values = np.arange(start_value, start_value + dup_size)
print(
'duplicate {} has {} values, replacing with {}'.format(
dup,
dup_size,
replace_values))
meta.loc[meta['alt'] == dup, 'alt'] = replace_values
for da in ds.data_vars.keys():
id_ = ds[da].attrs['station_id']
try:
lat = meta.loc[id_, 'lat']
lon = meta.loc[id_, 'lon']
alt = meta.loc[id_, 'alt']
except KeyError:
lat = ds[da].attrs['station_lat']
lon = ds[da].attrs['station_lon']
print('station {} keyerror.'.format(da))
alt = 'None'
try:
alt = awd.sel(x=float(lon), y=float(lat),
method='nearest').values.item()
except ValueError:
print('station {} has not known lat or lon...'.format(
ds[da].attrs['station_name']))
ds[da].attrs['station_lat'] = lat
ds[da].attrs['station_lon'] = lon
ds[da].attrs['station_alt'] = alt
return ds
def geo_pandas_time_snapshot(path=ims_path, var='TD', freq='10mins',
datetime='2013-10-19T10:00:00',
gis_path=gis_path, plot=True):
import xarray as xr
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
from aux_gps import path_glob
# TODO: add simple df support
# first, read ims_10mins data for choice var:
# file should be : 'IMS_TD_israeli_10mins_filled.nc'
glob_str = 'IMS_{}_israeli_{}*.nc'.format(var, freq)
file = path_glob(path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
ds = ds.sel(time=datetime)
# meta = read_ims_metadata_from_files(path=gis_path, option='10mins')
# meta.index = meta.ID.astype('int')
# meta.drop('ID', axis=1, inplace=True)
# meta.sort_index(inplace=True)
cols_list = []
for dvar in ds.data_vars.values():
value = dvar.values.item()
id_ = dvar.attrs['station_id']
lat = dvar.attrs['station_lat']
lon = dvar.attrs['station_lon']
alt = dvar.attrs['station_alt']
name = dvar.name
var_ = dvar.attrs['channel_name']
cols = [pd.to_datetime(datetime), name, id_, lat, lon, alt,
var_, value]
cols_list.append(cols)
df = pd.DataFrame(cols_list)
df.columns = ['time', 'name', 'id', 'lat', 'lon', 'alt', 'var_name', var_]
df.dropna(inplace=True)
df = df.astype({'lat': 'float64', 'lon': 'float64'})
# geopandas part:
isr = gpd.read_file(gis_path / 'Israel_demog_yosh.shp')
isr.crs = {'init': 'epsg:4326'}
geo_snap = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=isr.crs)
if plot:
ax = isr.plot()
geo_snap.plot(ax=ax, column=var_, cmap='viridis', edgecolor='black',
legend=True)
plt.title(var_ + ' in ' + datetime)
return geo_snap
def get_meta_data_hourly_ims_climate_database(ds):
import pandas as pd
name_list = []
for name, da in ds.data_vars.items():
data = [name.split('_')[0], da.attrs['station_id'], da.attrs['lat'],
da.attrs['lon'], da.attrs['height']]
name_list.append(data)
df = pd.DataFrame(name_list)
df.columns = ['name', 'id', 'lat', 'lon', 'height']
return df
def proccess_hourly_ims_climate_database(path=ims_path, var='tas',
times=('1996', '2019')):
import xarray as xr
import numpy as np
ds = xr.open_dataset(path / 'hourly_ims.nc')
if var is not None:
ds = ds.sel({'var': var})
print('selecting {} variables'.format(var))
if times is not None:
print('selecting times from {} to {}'.format(times[0], times[1]))
ds = ds.sel(time=slice(times[0], times[1]))
to_drop_list = []
for name, da in ds.data_vars.items():
if (np.isnan(da) == True).all().item():
to_drop_list.append(name)
ds = ds.drop(to_drop_list)
return ds
def read_all_hourly_ims_climate_database(path=ims_path / 'hourly', freq='03',
savepath=None):
"""downloaded from tau...ds is a dataset of all stations,
times is a time period"""
import xarray as xr
from aux_gps import save_ncfile
ds_list = []
for file in sorted(path.glob('*_{}hr_*.csv'.format(freq))):
ds = read_one_ims_hourly_station_csv(file)
ds_list.append(ds)
dss = xr.merge(ds_list)
print('Done!')
if savepath is not None:
save_ncfile(dss, savepath, filename='IMS_hourly_{}hr.nc'.format(freq))
return dss
def read_one_ims_hourly_station_csv(file):
import pandas as pd
from aux_gps import xr_reindex_with_date_range
from aux_gps import rename_data_vars
name = file.as_posix().split('/')[-1].split('_')[0]
sid = file.as_posix().split('/')[-1].split('_')[1]
freq = file.as_posix().split('/')[-1].split('_')[2]
freq = ''.join([x for x in freq if x.isdigit()]) + 'H'
array_name = '_'.join([name, sid])
print('reading {} station...'.format(array_name))
df = pd.read_csv(file, index_col='time')
df.index = pd.to_datetime(df.index)
df.drop(labels=['Unnamed: 0', 'name'], axis=1, inplace=True)
lat = df.loc[:, 'lat'][0]
lon = df.loc[:, 'lon'][0]
height = df.loc[:, 'height'][0]
df.drop(labels=['lat', 'lon', 'height'], axis=1, inplace=True)
ds = df.to_xarray()
station_attrs = {
'station_id': sid,
'lat': lat,
'lon': lon,
'height': height}
names_units_attrs = {
'ps': {
'long_name': 'surface_pressure', 'units': 'hPa'},
'tas': {
'long_name': 'surface_temperature', 'units': 'degC'},
'rh': {
'long_name': 'relative_humidity', 'units': '%'},
'wind_dir': {
'long_name': 'wind_direction', 'units': 'deg'},
'wind_spd': {
'long_name': 'wind_speed', 'units': 'm/s'}}
to_drop = []
for da in ds:
# add var names and units:
attr = names_units_attrs.get(da, {})
ds[da].attrs = attr
# add station attrs for each var:
ds[da].attrs.update(station_attrs)
# # rename var to include station name:
# ds[da].name = array_name + '_' + da
# last, drop all NaN vars:
try:
ds[da] = xr_reindex_with_date_range(ds[da], freq=freq)
except ValueError:
to_drop.append(da)
continue
# if ds[da].size == ds[da].isnull().sum().item():
# to_drop.append(da)
ds = ds[[x for x in ds if x not in to_drop]]
ds = rename_data_vars(
ds, suffix=None, prefix=array_name + '_', verbose=False)
ds = ds.sortby('time')
# ds = xr_reindex_with_date_range(ds, freq=freq)
return ds
def interpolate_hourly_IMS(path=ims_path, freq='03', field='ps', max_gap='6H',
station='JERUSALEM-CENTRE-MAN_6770', k_iqr=2,
times=['1996', '2019'],
plot=True):
from aux_gps import path_glob
from aux_gps import xr_reindex_with_date_range
from aux_gps import keep_iqr
import xarray as xr
import matplotlib.pyplot as plt
file = path_glob(path, 'IMS_hourly_{}hr.nc'.format(freq))[0]
ds = xr.open_dataset(file)
name = '{}_{}'.format(station, field)
da = ds[name]
da = xr_reindex_with_date_range(da, freq='1H')
da_inter = da.interpolate_na('time', max_gap=max_gap, method='cubic')
if times is not None:
da = da.sel(time=slice(*times))
da_inter = da_inter.sel(time=slice(*times))
if k_iqr is not None:
da_inter = keep_iqr(da_inter, k=k_iqr)
if plot:
fig, ax = plt.subplots(figsize=(18, 5))
df = da.to_dataframe()
df_inter = da_inter.to_dataframe()
df_inter.plot(style='b--', ax=ax)
df.plot(style='b-', marker='o', ax=ax, ms=5)
ax.legend(*[ax.get_lines()],
['PWV {} max interpolation'.format(max_gap), 'PWV'],
loc='best')
return da_inter
def read_ims_metadata_from_files(path=gis_path, freq='10mins'):
# for longer climate archive data use filename = IMS_climate_archive_meta_data.xls
import pandas as pd
"""parse ims stations meta-data"""
if freq == '10mins':
filename = 'IMS_10mins_meta_data.xlsx'
ims = pd.read_excel(path / filename,
sheet_name='מטה-דטה', skiprows=1)
# drop two last cols and two last rows:
ims = ims.drop(ims.columns[[-1, -2]], axis=1)
ims = ims.drop(ims.tail(2).index)
cols = ['#', 'ID', 'name_hebrew', 'name_english', 'east', 'west',
'lon', 'lat', 'alt', 'starting_date', 'variables', 'model',
'eq_position', 'wind_meter_height', 'notes']
ims.columns = cols
ims.index = ims['#'].astype(int)
ims = ims.drop('#', axis=1)
# fix lat, lon cols:
ims['lat'] = ims['lat'].str.replace(u'\xba', '').astype(float)
ims['lon'] = ims['lon'].str.replace(u'\xba', '').astype(float)
# fix alt col:
ims['alt'] = ims['alt'].replace('~', '', regex=True).astype(float)
# fix starting date col:
ims['starting_date'] = pd.to_datetime(ims['starting_date'])
elif freq == '10mins_csv':
filename = 'IMS_10mins_meta_data.csv'
ims = pd.read_csv(path / filename, skiprows=1)
# drop two last cols and two last rows:
ims = ims.drop(ims.columns[[-1, -2]], axis=1)
ims = ims.drop(ims.tail(2).index)
cols = ['#', 'ID', 'name_hebrew', 'name_english', 'east', 'west',
'lon', 'lat', 'alt', 'starting_date', 'variables', 'model',
'eq_position', 'wind_meter_height', 'notes']
ims.columns = cols
ims.index = ims['#'].astype(int)
ims = ims.drop('#', axis=1)
# fix lat, lon cols:
ims['lat'] = ims['lat'].str.replace(u'\xba', '').astype(float)
ims['lon'] = ims['lon'].str.replace(u'\xba', '').astype(float)
# fix alt col:
ims['alt'] = ims['alt'].replace('~', '', regex=True).astype(float)
# fix starting date col:
ims['starting_date'] =
|
pd.to_datetime(ims['starting_date'])
|
pandas.to_datetime
|
import pandas as pd
import pandas.testing as tm
import pytest
from tableauhyperapi import TableName
import pantab
import pantab._compat as compat
def test_read_doesnt_modify_existing_file(df, tmp_hyper):
pantab.frame_to_hyper(df, tmp_hyper, table="test")
last_modified = tmp_hyper.stat().st_mtime
# Try out our read methods
pantab.frame_from_hyper(tmp_hyper, table="test")
pantab.frames_from_hyper(tmp_hyper)
# Neither should not update file stats
assert last_modified == tmp_hyper.stat().st_mtime
def test_reports_unsupported_type(datapath):
"""
Test that we report an error if we encounter an unsupported column type.
Previously, we did not do so but instead assumed that all unsupported columns
would be string columns. This led to very fascinating failures.
"""
db_path = datapath / "geography.hyper"
with pytest.raises(
TypeError, match=r"Column \"x\" has unsupported datatype GEOGRAPHY"
):
pantab.frame_from_hyper(db_path, table="test")
def test_months_in_interval_raises(df, tmp_hyper, monkeypatch):
# Monkeypatch a new constructor that hard codes months
def __init__(self, months: int, days: int, microseconds: int):
self.months = 1
self.days = days
self.microseconds = microseconds
monkeypatch.setattr(pantab._writer.tab_api.Interval, "__init__", __init__)
pantab.frame_to_hyper(df, tmp_hyper, table="test")
with pytest.raises(
ValueError, match=r"Cannot read Intervals with month components\."
):
pantab.frame_from_hyper(tmp_hyper, table="test")
with pytest.raises(
ValueError, match=r"Cannot read Intervals with month components\."
):
pantab.frames_from_hyper(tmp_hyper)
def test_error_on_first_column(df, tmp_hyper, monkeypatch):
"""
We had a defect due to which pantab segfaulted when an error occured in one of
the first two columns. This test case is a regression test against that.
"""
# Monkeypatch a new constructor that hard codes months
def __init__(self, months: int, days: int, microseconds: int):
self.months = 1
self.days = days
self.microseconds = microseconds
monkeypatch.setattr(pantab._writer.tab_api.Interval, "__init__", __init__)
df = pd.DataFrame(
[[
|
pd.Timedelta("1 days 2 hours 3 minutes 4 seconds")
|
pandas.Timedelta
|
import random
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Explanation "Where": Plot for explanation in fundamentals chapter
# 1. Generate data with gaussian, uniform and mixed distribution
n = 3000
var = 0.12
# Dimension 1
dim1_sequence_100percent_gaussian = np.random.normal(0.5, var, n)
dim1_sequence_100percent_uniform = np.random.uniform(-0.5, 1.5, n)
dim1_sequence_mixed = np.append(np.random.normal(0.5, var, int(n/2)), np.random.uniform(-0.5, 1.5, n))
# Dimension 2
dim2_sequence_100percent_gaussian = np.random.normal(0.5, var, n)
dim2_sequence_100percent_uniform = np.random.uniform(-0.5, 1.5, n)
dim2_sequence_mixed = np.append(np.random.normal(0.5, var, int(n/2)), np.random.uniform(-0.5, 1.5, n))
# Shuffle data
random.shuffle(dim1_sequence_100percent_gaussian)
random.shuffle(dim1_sequence_100percent_uniform)
random.shuffle(dim1_sequence_mixed)
random.shuffle(dim2_sequence_100percent_gaussian)
random.shuffle(dim2_sequence_100percent_uniform)
random.shuffle(dim2_sequence_mixed)
# 2. Generate 2-dimensional dataset
# Gaussian-uniform
df_gaussian_uniform = pd.DataFrame()
df_gaussian_uniform['Dim 1']=pd.Series(dim1_sequence_100percent_gaussian)
df_gaussian_uniform['Dim 2']=
|
pd.Series(dim2_sequence_100percent_uniform)
|
pandas.Series
|
# ignore this file
# for dsw use only
import os
import pandas as pd
root_path = r'/Volumes/MSC/Databases/2018 Bridging Databases'
# paths to bridging_n_n+5000.csv datasets - these have the metals
paths = os.listdir(root_path)
paths = [item for item in paths if item[0:8] == 'bridging' and os.path.splitext(item)[1] == '.csv']
# paths to .csvs in bridging_proc
paths_to_bridging = os.path.join(root_path, 'bridging_proc')
paths_to_bridging = os.listdir(paths_to_bridging)
# get all metals
df_METALS = pd.DataFrame()
for p in paths:
if p[0:2] == '._': # pass over DS store settings storage files
pass
else:
df_m = pd.read_csv(os.path.join(root_path, p))
df_m = df_m[df_m['Z-Metal'].notnull()]
df_METALS = df_METALS.append(df_m)
df_METALS = df_METALS.reset_index(drop=True)
# get all bridges
df_BRIDGES =
|
pd.DataFrame()
|
pandas.DataFrame
|
import io
import os
import time
import pandas as pd
import subprocess
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if port == outports[1]['name'] :
print(msg.body)
elif port == outports[2]['name'] :
print('Limit reached - Exit')
#exit(0)
class config:
## Meta data
config_params = dict()
tags = {'sdi_utils':'', 'pandas':''}
version = "0.1.0"
operator_description = "Dispatch Tables"
operator_name = 'repl_dispatch_tables'
operator_description_long = "Send next table to process."
add_readme = dict()
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
periodicity = 0
config_params['periodicity'] = {'title': 'Periodicity (s)',
'description': 'Periodicity (s).',
'type': 'integer'}
round_trips_to_stop = 10000000
config_params['round_trips_to_stop'] = {'title': 'Roundtips to stop',
'description': 'Fraction of tables to parallelize.',
'type': 'integer'}
count_all_roundtrips = False
config_params['count_all_roundtrips'] = {'title': 'Count All Roundtrips',
'description': 'Count all roundtrips irrespective to changes.',
'type': 'boolean'}
df_tables = pd.DataFrame()
pointer = 0
no_changes_counter = 0
num_roundtrips = 0
num_batch = 1
first_call = True
def set_replication_tables(msg) :
global df_tables
global first_call
global num_batch
header = [c["name"] for c in msg.attributes['table']['columns']]
df_tables =
|
pd.DataFrame(msg.body,columns=header)
|
pandas.DataFrame
|
from typing import Union, Iterable, List
import pandas as pd
from sklearn.base import BaseEstimator
from vivid.backends.experiments import ExperimentBackend
from vivid.core import BaseBlock
from .engine import BinCountEncoder, OneHotEncoder, CountEncoder, BaseEngine
def get_target_columns(source_df: pd.DataFrame,
column: Union[str, List] = '__all__',
excludes: Union[None, List] = None
) -> Iterable:
"""
select the target columns
Args:
source_df:
input dataframe.
column:
Explicitly used columns. if set `"__all__"`, return all columns.
column と exlucdes は同時に設定できません (どちらか一方だけ選択できます)
excludes:
Explicitly remove columns
Returns:
"""
use_all = column == '__all__'
if not use_all and excludes is not None:
raise ValueError(
'set specific columns and excludes both is not corrected condition. '
'columns: {}'.format(','.join(map(str, column))) and 'excludes: {}'.format(','.join(map(str, excludes)))
)
if column == '__all__':
column = source_df.columns.tolist()
if isinstance(column, str):
return [column]
if isinstance(column, Iterable):
return [x for x in column]
return []
class ColumnWiseBlock(BaseBlock):
"""
apply feature engineering for each columns.
"""
engine = None
def __init__(self,
name,
column: Union[str, List] = '__all__',
excludes: Union[None, List] = None,
**kwargs):
"""
Args:
name:
this block name.
column:
use columns. if set `"__all__"`, use all columns get from parent blocks
excludes:
if set, exclude these columns from column.
[NOTE]
when set specific column (ex. ['foo', 'bar']), excludes must be None.
**kwargs:
"""
super(ColumnWiseBlock, self).__init__(name=name, **kwargs)
self.column = column
self.excludes = excludes
def create_new_engine(self, column_name: str) -> BaseEngine:
return self.engine()
def unzip(self, experiment: ExperimentBackend):
self.fitted_models_ = experiment.load_object('mapping')
return self
def frozen(self, experiment: ExperimentBackend):
experiment.save_as_python_object('mapping', self.fitted_models_)
return self
def get_output_colname(self, column):
return self.name + '_' + column
def fit(self, source_df, y, experiment) -> pd.DataFrame:
columns = get_target_columns(source_df=source_df,
column=self.column,
excludes=self.excludes)
mappings = {}
for c in sorted(columns):
clf = self.create_new_engine(c)
clf.fit(source_df[c], y=y)
mappings[c] = clf
self.fitted_models_ = mappings
return self.transform(source_df)
def transform(self, source_df):
out_df = pd.DataFrame()
for column, clf in self.fitted_models_.items():
out = clf.transform(source_df[column])
out_df[self.get_output_colname(column)] = out
return out_df
class FilterBlock(ColumnWiseBlock):
def fit(self, source_df, y, experiment) -> pd.DataFrame:
return self.transform(source_df)
def transform(self, source_df):
cols = get_target_columns(source_df, column=self.column, excludes=self.excludes)
return source_df[cols].copy()
def frozen(self, experiment: ExperimentBackend):
return self
def unzip(self, experiment: ExperimentBackend):
return self
class BinningCountBlock(ColumnWiseBlock):
engine = BinCountEncoder
def __init__(self, name, column='__all__', bins=25, **kwargs):
super(BinningCountBlock, self).__init__(name=name, column=column, **kwargs)
self.bins = bins
def create_new_engine(self, column_name: str):
return self.engine(bins=self.bins)
def get_output_colname(self, column):
return '{}_bins={}'.format(column, self.bins)
class OneHotEncodingBlock(ColumnWiseBlock):
engine = OneHotEncoder
def create_new_engine(self, column_name: str) -> BaseEstimator:
return self.engine(min_freq=0, max_columns=20)
def transform(self, source_df):
out_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Data visualization functions"""
from fastapi import APIRouter, HTTPException, Depends
from pydantic import BaseModel
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
from app.ml import City, validate_city
from app.data.files.state_abbr import us_state_abbrev as abbr
router = APIRouter()
MODEL_CSV = 'https://media.githubusercontent.com/media/CityScape-Datasets/Workspace_Datasets/main/Models/nn_model/nn_model.csv'
class CityData():
"""
Locates specific city data
- Demographics
- Employement -> industry, employment
- Crime -> violent crime, property crime
- Air Quality Index
"""
def __init__(self, current_city):
self.current_city = current_city
self.dataframe = pd.read_csv(MODEL_CSV)
self.subset = self.dataframe[self.dataframe['City'] == self.current_city.city]
def demographics(self):
return ['Hispanic', 'White', 'Black', 'Native', 'Asian', 'Pacific']
def industry(self):
return ['PrivateWork', 'PublicWork', 'SelfEmployed', 'FamilyWork']
def employment(self):
return ['Professional', 'Service', 'Office', 'Construction', 'Production']
def crime(self):
return ['Violent crime', 'Property crime', 'Arson']
def violent_crime(self):
return ['Murder and nonnegligent manslaughter','Rape', 'Robbery', 'Aggravated assault']
def property_crime(self):
return ['Burglary','Larceny- theft', 'Motor vehicle theft']
def air_quality_index(self):
return ['Days with AQI', 'Good Days', 'Moderate Days','Unhealthy for Sensitive Groups Days', 'Unhealthy Days','Very Unhealthy Days', 'Hazardous Days', 'Max AQI', '90th Percentile AQI', 'Median AQI', 'Days CO', 'Days NO2', 'Days Ozone', 'Days SO2', 'Days PM2.5', 'Days PM10']
@router.post("/api/demographics_graph")
async def demographics_plot(current_city:City):
"""
Visualize demographic information for city
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Demographics
city_demographics = city_data.subset[city_data.demographics()]
city_demographics['Not Specified'] = 100 - city_demographics.sum(axis=1) # Accounting for people that did not respond
melt = pd.melt(city_demographics)
melt.columns = ['demographic', 'percentage']
fig = px.pie(melt, values ='percentage', names ='demographic')
fig.update_layout(
title={
'text': f'Demographics in {city}',
'y':0.98,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.show()
return fig.to_json()
@router.post("/api/employment_graph")
async def employment_plot(current_city:City):
"""
Visualize employment information for city
- see industry breakdown and employment type
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Industry
industry_type = city_data.subset[city_data.industry()]
industry_melt = pd.melt(industry_type)
industry_melt.columns = ['industry', 'percentage']
# Employment Type
employment_type = city_data.subset[city_data.employment()]
type_melt = pd.melt(employment_type)
type_melt.columns = ['employment type', 'percentage']
#Create subplots
fig = make_subplots(rows=1, cols=2, subplot_titles = (f'Industry in {city}', f'Employment Types in {city}'))
fig.add_trace(go.Bar(x = industry_melt['industry'], y = industry_melt['percentage'],
marker = dict(color = industry_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 1)
fig.add_trace(go.Bar(x =type_melt['employment type'], y =type_melt['percentage'],
marker = dict(color = type_melt['percentage'], coloraxis = "coloraxis")),
row = 1, col = 2)
fig.update_layout(
coloraxis=dict(colorscale = 'Bluered_r'),
coloraxis_showscale = False,
showlegend = False)
fig.show()
return fig.to_json()
@router.post("/api/crime_graph")
async def crime_plot(current_city:City):
"""
Visualize crime information for city
- see overall crime breakdown
- visualize breakdown of violent crime and property crime
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Crime Categories
crime_type = city_data.subset[city_data.crime()]
crime_melt = pd.melt(crime_type)
crime_melt.columns = ['categories', 'total']
# Violent Crime
violent_crime_type = city_data.subset[city_data.violent_crime()]
violent_crime_type_melt = pd.melt(violent_crime_type)
violent_crime_type_melt.columns = ['violent crime type', 'total']
# Property Crime
property_crime_type = city_data.subset[city_data.property_crime()]
property_crime_melt = pd.melt(property_crime_type)
property_crime_melt.columns = ['property crime type', 'total']
#Create subplots
fig = make_subplots(
rows=2, cols=2,
subplot_titles = (f"Crime Breakdown in {city}", f"Violent Crime Breakdown in {city}", f"Property Crime Breakdown in {city}"),
specs = [[{"type":"xy", 'rowspan':2}, {"type": "pie"}],
[None, {"type": "pie"}]],
)
fig.add_trace(go.Bar(name = 'Crime Types', x = crime_melt['categories'], y = crime_melt['total']),
row = 1, col = 1)
fig.add_trace(go.Pie(values = violent_crime_type_melt['total'],
labels = violent_crime_type_melt['violent crime type']),
row = 1, col = 2)
fig.add_trace(go.Pie(values = property_crime_melt['total'],
labels = property_crime_melt['property crime type']),
row = 2, col = 2)
fig.show()
return fig.to_json()
@router.post("/api/aqi_graph")
async def air_quality_plot(current_city:City):
"""
Visualize air quality information for city
args:
- city
returns:
JSON string to render with react-plotly.js
"""
city = validate_city(current_city)
city_data = CityData(city)
# Air Quality
air_quality_details = city_data.subset[city_data.air_quality_index()]
air_quality_melt = pd.melt(air_quality_details)
air_quality_melt.columns = ['air quality indicators', 'days']
fig = make_subplots(rows = 1, cols = 1)
fig.add_trace(go.Bar(x = air_quality_melt['days'], y = air_quality_melt['air quality indicators'],
marker = dict(color = air_quality_melt['days'], coloraxis = "coloraxis"), orientation = 'h'))
fig.update_layout(
coloraxis=dict(colorscale = 'Viridis'),
coloraxis_showscale = False,
xaxis_range = [0, 360],
title={
'text': f'Air Quality in {city}',
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'})
fig.show()
return fig.to_json()
POPULATION_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_cleaned.csv'
FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/population2010-2019/csv/population_prediction.csv'
@router.post('/api/population_forecast_graph')
async def population_forecast_graph(city:City):
"""
Create visualization of historical and forecasted population
args:
- city: str -> The target city
- periods: int -> number of years to forecast for
returns:
Visualization of population forecast
- 10 year of historical data
- forecasts for number of years entered
"""
city = validate_city(city)
location = [city.city + ', ' + city.state]
# Historical population data
population = pd.read_csv(POPULATION_CSV)
population = population[population['City,State'].isin(location)]
population = population[['City,State', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']]
population_melt = population.melt(id_vars=['City,State'], var_name='ds', value_name='y')
population_melt['ds'] = (population_melt['ds']).astype(int)
# Predictions
forecast = pd.read_csv(FORECAST_CSV)
predictions = forecast[forecast['City,State'].isin(location)][9:]
predictions['year'] = (predictions['year']).astype(int)
# Graph Data
ax = population_melt.plot(x = 'ds', y = 'y', label='Observed', figsize= (10, 8))
predictions[['year', 'yhat']].plot(ax = ax, x = 'year', y = 'yhat', label = "Forecast")
# Fill to show upper and lower bounds
# Graph predictions including the upper and lower bounds
fig = go.Figure()
fig.add_trace(go.Scatter(
name = 'Original',
x = population_melt['ds'],
y = population_melt['y'],
fill = None,
mode = 'lines',
line_color = 'black',
showlegend = True
))
fig.add_trace(go.Scatter(
name = 'Forecast',
x = predictions['year'],
y = predictions['yhat'],
fill = None,
mode = 'lines',
line_color = 'red',
showlegend = True
))
fig.add_trace(go.Scatter(
name = 'Lower Bound',
x = predictions['year'],
y = predictions['yhat_lower'],
fill = None,
mode = 'lines',
line_color = 'gray'
))
fig.add_trace(go.Scatter(
name = 'Upper Bound',
x = predictions['year'],
y = predictions['yhat_upper'],
fill='tonexty',
mode='lines',
line_color = 'gray'
))
# Edit the layout
fig.update_layout({
'autosize':True,
'title': f'{location[0]} Population Forecast',
'title_x': 0.5,
'xaxis_title': 'Year',
'yaxis_title': 'Population'
})
fig.update_yaxes(automargin = True)
fig.update_xaxes(automargin = True, nticks=20)
fig.show()
return fig.to_json()
FMR_0 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr0.csv'
FMR_0_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr0_predictions.csv'
FMR_1 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr1.csv'
FMR_1_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr1_predictions.csv'
FMR_2 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr2.csv'
FMR_2_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr2_predictions.csv'
FMR_3 = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr3.csv'
FMR_3_FORECAST_CSV = 'https://raw.githubusercontent.com/jiobu1/labspt15-cityspire-g-ds/main/notebooks/model/rental/csv/fmr3_predictions.csv'
@router.post('/api/rental_forecast_graph')
def rental_forecast_graph(city:City, bed):
"""
Create visualization of historical and forecasted \n
Fair Market Rents for studios - 3 bedrooms
args:
- city: str -> The target city
- beds: int -> number of beds (0,1,2,3)
returns:
Visualization of Rental forecast
- 5 year of historical data
- 10 years forecasted data
"""
city = validate_city(city)
location = [city.city + ', ' + city.state]
if bed == "0":
RENTAL_CSV = FMR_0
RENTAL_FORECAST_CSV = FMR_0_FORECAST_CSV
elif bed == "1":
RENTAL_CSV = FMR_1
RENTAL_FORECAST_CSV = FMR_1_FORECAST_CSV
elif bed == "2":
RENTAL_CSV = FMR_2
RENTAL_FORECAST_CSV = FMR_2_FORECAST_CSV
else:
RENTAL_CSV = FMR_3
RENTAL_FORECAST_CSV = FMR_3_FORECAST_CSV
# Historical Rental data
rental =
|
pd.read_csv(RENTAL_CSV)
|
pandas.read_csv
|
import pandas as pd
import os, glob
def get_negative_cols(pais,hh_df):
try: negative_dict = pd.read_csv('output/hh_survey_negative_values.csv').set_index('pais')
except: negative_dict = pd.DataFrame(columns=['negative_values'])
negative_cols = [_c for _c in hh_df.columns if ((hh_df[_c].dtype == 'float32' or hh_df[_c].dtype == 'float64')
and ('ict' not in _c) and ('ing' not in _c or 'ct' in _c or 'trsgob' in _c)
and (hh_df[_c].min() < 0))]
out_str = ''
if len(negative_cols) == 0: out_str = '--, '
else:
for i in negative_cols: out_str += i+', '
negative_dict.loc[pais,'negative_values'] = out_str[:-2]
negative_dict.index.name = 'pais'
negative_dict.sort_index().to_csv('output/hh_survey_negative_values.csv')
if len(negative_cols)==0: return None
return negative_cols
def get_hh_survey(pais):
hh_survey = None
if pais == 'chl': pais = 'chi'
try:
file_name = 'consumption_and_household_surveys/2017-10-13/Household_survey_with_new_file_name/'+pais+'_household_expenditure_survey.dta'
hh_survey = pd.read_stata(file_name).set_index('cod_hogar')
except:
file_name = 'consumption_and_household_surveys/Expansion_Countries/'
for f in glob.glob(file_name+pais.upper()+'*'):
if 'PERSONA' not in f:
hh_survey =
|
pd.read_stata(f)
|
pandas.read_stata
|
# -*- coding: utf-8 -*-
'''
Created on Mon Sep 28 16:26:09 2015
@author: r4dat
'''
# ICD9 procs from NHSN definition.
# Diabetes diagnoses from AHRQ version 5 SAS program, CMBFQI32.TXT
# sample string generator print((','.join(map(str, [str(x) for x in range(25040,25094)]))).replace(',','","'))
#
# "25000"-"25033",
# "64800"-"64804" = "DM" /* Diabetes w/o chronic complications*/
# "25000","25001","25002","25003","25004","25005","25006","25007","25008","25009","25010","25011","25012","25013","25014","25015","25016","25017","25018","25019","25020","25021","25022","25023","25024","25025","25026","25027","25028","25029","25030","25031","25032","25033",
# "64800","64801","64802","64803","64804"
#
# "25040"-"25093",
# "7751 " = "DMCX" /* Diabetes w/ chronic complications */
# "25040","25041","25042","25043","25044","25045","25046","25047","25048","25049","25050","25051","25052","25053","25054","25055","25056","25057","25058","25059","25060","25061","25062","25063","25064","25065","25066","25067","25068","25069","25070","25071","25072","25073","25074","25075","25076","25077","25078","25079","25080","25081","25082","25083","25084","25085","25086","25087","25088","25089","25090","25091","25092","25093"
# "7751"
#
import pypyodbc
import pandas as pd
import numpy as np
pd.set_option('expand_frame_repr', False)
inpdb12 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db12.accdb')
inpdb13 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db13.accdb')
inpdb14 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db14.accdb')
inpdb15 = pypyodbc.connect('Driver={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=C:\\Users\\db15.accdb')
conn_dict = {2012: inpdb12,
2013: inpdb13,
2014: inpdb14,
2015: inpdb15}
# Dictionary: each year has a tuple of names for the needed tables
# tables can be named differently each year
tablenames_dict = {2008: ['[ST08IP-DS1]', '[ST08IP-DS1DIAG]', '[ST08IP-DS1PROC]', '[ST08IP-DS1REV'],
2009: ['[ST09IP-4Q-DS1MAIN]', '[ST09IP-4Q-DS1DIAG]', '[ST09IP-4Q-DS1PROC]', '[ST09IP-4Q-DS1REV'],
2010: ['[ST2010IPDS1MAIN]', '[ST2010IPDS1DIAG]', '[ST2010IPDS1PROC]', '[ST2010IPDS1REV'],
2011: ['[ST2011Q4IPDS1MAIN]', '[ST2011Q4IPDS1DIAG]', '[ST2011Q4IPDS1PROC]', '[ST2011Q4IPDS1REV'],
2012: ['[ST2012Q4IPDS1]', '[ST2012Q4IPDS1DIAG]', '[ST2012Q4IPDS1PROC]', '[ST2012Q4IPDS1REV'],
2013: ['[ST2013Q4IPDS1MAIN]', '[ST2013Q4IPDS1DIAG]', '[ST2013Q4IPDS1PROC]', '[ST2013Q4IPDS1REV'],
2014: ['[ST2014Q4IPDS1]', '[ST2014Q4IPDS1DIAG]', '[ST2014Q4IPDS1PROC]', '[ST2014Q4IPDS1REV'],
2015: ['[ST2015Q1IPDS1]', '[ST2015Q1IPDS1DIAG]', '[ST2015Q1IPDS1PROC]', '[ST2015Q1IPDS1REV']}
###############################################################################
# DF processing
###############################################################################
cols_to_keep = ['CNTRL', 'HOSP', 'ZIP', 'DOB', 'SEX', 'ADATE','adate']
cols_to_keep = [x.lower() for x in cols_to_keep]
# Function to stack datasets according to discharge year
def make_main(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(
' '.join(['select * from', tablenames_dict[iteryear][0], 'where year(adate) =', str(iyear), ';']),
conn_dict[iteryear]) # where year(adate) =',str(iyear)
base_ds = base_ds[cols_to_keep]
base_ds['orig_table'] = tablenames_dict[iteryear][0]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(
['file', tablenames_dict[iteryear][0], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(
' '.join(['select * from', tablenames_dict[iteryear][0], 'where year(adate) =', str(iyear), ';']),
conn_dict[iteryear])
add_ds = add_ds[cols_to_keep]
add_ds['orig_table'] = tablenames_dict[iteryear][0]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][0], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_colo(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join([
'select b.cntrl,proc,procdate,hosp,dob,sex,adate,ddate,ethn,race FROM (select distinct cntrl,proc,procdate from',
tablenames_dict[iteryear][2], 'where year(procdate) =', str(iyear),
" and proc IN('1731','1732','1734','1735','1736','1739', '4503', '4526', '4541','4549', '4552', '4571','4572','4573','4574','4575','4576', '4579', '4581','4582','4583', '4592','4593','4594','4595', '4603', '4604', '4610','4611', '4613', '4614', '4643', '4652', '4675','4676', '4694')",
') as a left join', tablenames_dict[iteryear][0],
'as b ON a.cntrl=b.cntrl;']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][2]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(
['file', tablenames_dict[iteryear][2], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join([
'select b.cntrl,proc,procdate,hosp,dob,sex,adate,ddate,ethn,race FROM (select distinct cntrl,proc,procdate from',
tablenames_dict[iteryear][2], 'where year(procdate) =', str(iyear),
" and proc IN('1731','1732','1734','1735','1736','1739', '4503', '4526', '4541','4549', '4552', '4571','4572','4573','4574','4575','4576', '4579', '4581','4582','4583', '4592','4593','4594','4595', '4603', '4604', '4610','4611', '4613', '4614', '4643', '4652', '4675','4676', '4694')",
') as a left join', tablenames_dict[iteryear][0],
'as b ON a.cntrl=b.cntrl;']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][2]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][2], 'has', str(record_count), 'records with admit dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_diab(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25000','25001','25002','25003','25004','25005','25006','25007','25008','25009','25010','25011','25012','25013','25014','25015','25016','25017','25018','25019','25020','25021','25022','25023','25024','25025','25026','25027','25028','25029','25030','25031','25032','25033','64800','64801','64802','64803','64804')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][1]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with Diab in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25000','25001','25002','25003','25004','25005','25006','25007','25008','25009','25010','25011','25012','25013','25014','25015','25016','25017','25018','25019','25020','25021','25022','25023','25024','25025','25026','25027','25028','25029','25030','25031','25032','25033','64800','64801','64802','64803','64804')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][1]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with Diab dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
def make_diabx(iyear):
for iteryear in conn_dict.keys():
if iteryear == iyear:
base_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25040','25041','25042','25043','25044','25045','25046','25047','25048','25049','25050','25051','25052','25053','25054','25055','25056','25057','25058','25059','25060','25061','25062','25063','25064','25065','25066','25067','25068','25069','25070','25071','25072','25073','25074','25075','25076','25077','25078','25079','25080','25081','25082','25083','25084','25085','25086','25087','25088','25089','25090','25091','25092','25093','7751')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
base_ds['orig_table'] = tablenames_dict[iteryear][1]
base_ds['dbyear'] = iteryear
record_count = len(base_ds)
print(' '.join(['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with DiabX in',
'CY' + str(iyear)]))
if iteryear > iyear:
add_ds = pd.read_sql(' '.join(
['select a.cntrl,diag,adate,ddate FROM (select distinct cntrl,diag from', tablenames_dict[iteryear][1],
"WHERE diag IN('25040','25041','25042','25043','25044','25045','25046','25047','25048','25049','25050','25051','25052','25053','25054','25055','25056','25057','25058','25059','25060','25061','25062','25063','25064','25065','25066','25067','25068','25069','25070','25071','25072','25073','25074','25075','25076','25077','25078','25079','25080','25081','25082','25083','25084','25085','25086','25087','25088','25089','25090','25091','25092','25093','7751')",
') as a LEFT JOIN', tablenames_dict[iteryear][0], 'as b ON a.cntrl=b.cntrl WHERE year(adate)=',
str(iyear), ';']), conn_dict[iteryear])
add_ds['orig_table'] = tablenames_dict[iteryear][1]
add_ds['dbyear'] = iteryear
record_count = len(add_ds)
print(' '.join(
['file', tablenames_dict[iteryear][1], 'has', str(record_count), 'records with DiabX dates in',
'CY' + str(iyear)]))
base_ds = pd.concat([base_ds, add_ds])
return base_ds
# Interactive Test Frame
# test=pd.read_sql(' '.join(['select TOP 200 * from',tablenames_dict[2014][0]]),conn_dict[2014])
# print('Creating Main dataset')
# main13=make_main(2013)
## 2013
col13 = make_colo(2013)
diab = make_diab(2013)
diabx = make_diabx(2013)
col13['key'] = col13['cntrl'].map(int).map(str) + col13['dbyear'].map(str)
diab['key'] = diab['cntrl'].map(str) + diab['dbyear'].map(str)
diabx['key'] = diabx['cntrl'].map(str) + diabx['dbyear'].map(str)
col13['dm'] = col13['key'].isin(diab['key'])
col13['dmx'] = col13['key'].isin(diabx['key'])
col13 = col13.rename(columns=
{
'hosp': 'ccn',
'sex': 'gender',
'adate': 'admitdate'
})
col13.drop_duplicates(subset=['key'], keep='first', inplace=True)
col13['dob'] = pd.to_datetime(col13['dob'])
col13['procdate'] = pd.to_datetime(col13['procdate'])
col13['ccn'] = col13['ccn'].map(int)
col13['admitdate'] = pd.to_datetime(col13['admitdate'])
## 2014
col14 = make_colo(2014)
diab = make_diab(2014)
diabx = make_diabx(2014)
col14['key'] = col14['cntrl'].map(int).map(str) + col14['dbyear'].map(str)
diab['key'] = diab['cntrl'].map(str) + diab['dbyear'].map(str)
diabx['key'] = diabx['cntrl'].map(str) + diabx['dbyear'].map(str)
col14['dm'] = col14['key'].isin(diab['key'])
col14['dmx'] = col14['key'].isin(diabx['key'])
col14 = col14.rename(columns=
{
'hosp': 'ccn',
'sex': 'gender',
'adate': 'admitdate'
})
col14.drop_duplicates(subset=['key'], keep='first', inplace=True)
col14['dob'] = pd.to_datetime(col14['dob'])
col14['procdate'] = pd.to_datetime(col14['procdate'])
col14['ccn'] = col14['ccn'].map(int)
col14['admitdate'] = pd.to_datetime(col14['admitdate'])
colo_discharges = col13.append(col14, ignore_index=True)
all_proc = pd.read_csv(r"C:\All_Procedures_2013_2015.csv", header=0)
all_inf = pd.read_csv(r"C:\Line List - All Infection Events 2013-2015.csv", header=0)
colo_nhsn = all_proc[all_proc['procCode'] == 'COLO']
colo_nhsn.columns = map(str.lower, colo_nhsn.columns)
colo_nhsn['dob'] =
|
pd.to_datetime(colo_nhsn['dob'])
|
pandas.to_datetime
|
"""Implementation of the workflow for (single and batches of) reads.
Each read is aligned to both reference sequences. The best match is taken and haplotyping is
performed by considering variants at the informative positions if any match could be found.
The result is a pair of ``blast.BlastMatch`` and ``haplotyping.HaplotypingResult`` (or ``None``)
that can be joined by the read file name. Further, the sample information can be derived
from this with a regexp.
"""
import os
import re
import typing
import attr
from logzero import logger
import pandas as pd
import tempfile
from .blast import run_blast, BlastMatch
from .common import load_fasta
from .haplotyping import run_haplotyping, HaplotypingResultWithMatches
#: Default minimal quality to consider a match as true.
DEFAULT_MIN_IDENTITY = 0.5
#: Default regular expression to use for inferring the sample information.
# TODO: change a bit...?
DEFAULT_PARSE_RE = r"^(?P<sample>[^_]+_[^_]+_[^_]+)_(?P<primer>.*?)\.fasta"
#: The reference files to use.
REF_FILE = os.path.join(os.path.dirname(__file__), "data", "ref_seqs.fasta")
@attr.s(auto_attribs=True, frozen=True)
class NamedSequence:
"""A named sequence."""
#: the sequence name
name: str
#: the sequence
sequence: str
def only_blast(path_query: str) -> typing.Tuple[BlastMatch]:
"""Run BLAST and haplotyping for the one file at ``path_query``."""
logger.info("Running BLAST on all references for %s...", path_query)
return run_blast(REF_FILE, path_query)
def blast_and_haplotype(path_query: str) -> typing.Dict[str, HaplotypingResultWithMatches]:
return run_haplotyping(only_blast(path_query))
def blast_and_haplotype_many(
paths_query: typing.Iterable[str],
) -> typing.Dict[str, HaplotypingResultWithMatches]:
"""Run BLAST and haplotyping for all files at ``paths_query``.
Return list of dicts with keys "best_match" and "haplo_result".
"""
logger.info("Running BLAST and haplotyping for all queries...")
result = {}
for path_query in paths_query:
path_result = blast_and_haplotype(path_query)
if path_result:
result.update(path_result)
else:
result[path_query] = HaplotypingResultWithMatches.build_empty()
return result
def strip_ext(s: str) -> str:
return s.rsplit(".", 1)[0]
def results_to_data_frames(
results: typing.Dict[str, HaplotypingResultWithMatches], regex: str, column: str = "query"
) -> typing.Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""Convert list of dicts with best_match/haplo_result to triple of Pandas DataFrame.
The three DataFrame will contain the following information:
1. A summary data frame showing best BLAST match target and identity plus haplotype.
2. A data frame showing BLAST result details.
3. A data frame showing haplotyping result details.
"""
r_summary = []
r_blast = []
r_haplo = []
for path, result in results.items():
haplo_result = result.result
if not haplo_result:
for query, query_seq in load_fasta(path).items():
r_summary.append(
{"query": query, "database": ".", "identity": 0, "orig_sequence": query_seq}
)
r_blast.append({"query": query})
r_haplo.append({"query": query})
else:
query_seq = load_fasta(path)[haplo_result.query]
haplo_matches = result.matches
best_match = list(sorted(haplo_matches, key=lambda m: m.identity, reverse=True))[0]
r_summary.append(
{
"query": best_match.query,
"database": best_match.database,
"identity": 100.0 * best_match.identity,
**{
key: value
for key, value in haplo_result.asdict().items()
if key in ("best_haplotypes", "best_score")
},
"orig_sequence": query_seq,
}
)
r_blast.append(
{
"query": best_match.query,
"database": best_match.database,
"identity": 100.0 * best_match.identity,
"q_start": best_match.query_start,
"q_end": best_match.query_end,
"q_str": best_match.query_strand,
"db_start": best_match.database_start,
"db_end": best_match.database_end,
"db_str": best_match.database_strand,
"alignment": best_match.alignment.wrapped(
best_match.query_start, best_match.database_start
),
"orig_sequence": query_seq,
}
)
r_haplo.append(
{
"query": best_match.query,
**{
key: value
for key, value in haplo_result.asdict().items()
if "_pos" in key
or "_neg" in key
or key in ("best_haplotypes", "best_score")
},
}
)
dfs = pd.DataFrame(r_summary), pd.DataFrame(r_blast),
|
pd.DataFrame(r_haplo)
|
pandas.DataFrame
|
"""Provides entry point main()"""
from midi_lens.Visualizer import Visualizer
from midi_lens.Midi import Midi
import inspect
import warnings
import pandas as pd
import numpy as np
from tqdm import tqdm
from os.path import join, basename, exists, splitext
from glob import glob
def analysis_from_json(file) -> pd.DataFrame:
"""Scan datafram from .json file,
and check if it contains valid statistics
Args:
file (os.path): name of .json file
Returns:
pd.DataFrame: DataFrame containing midi file stats
"""
try:
data = pd.read_json(file, orient='table')
except Exception as err:
print("Error reading from json ({})".format(err))
return pd.DataFrame()
# to-do
# check if shape is ok,
# and that columns match
cols = ['tempo_diff_avg', 'tempo_max_diff', 'tempo_min_diff', 'tempo_range',
'avg_tempo', 'tempo_len_range', 'avg_tempo_len', 'weighted_vel',
'avg_vel_range', 'avg_avg_vel', 'std_vel_range', 'std_avg_vel',
'avg_tone_range', 'avg_avg_tone', 'std_tone_range', 'std_avg_tone',
'len_diff_range', 'avg_len_diff', 'avg_poly', 'poly_range',
'total_len']
if len(data.columns) != len(cols) or (data.columns != cols).all() or data.shape[0] < 1:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# @Time : 11/19/20 7:28 PM
# @Author : Saptarshi
# @Email : <EMAIL>
# @File : ConfirmedStudents.py
# @Project: eda-seminar-organization-grading
#def add(a: int, b: int) -> int:
# return a + b
import numpy as np
import pandas as pd
import sys
from openpyxl import load_workbook
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl import *
import sys
import argparse
import os
def create_arg_parser():
"""Creates and returns the ArgumentParser object"""
parser = argparse.ArgumentParser(description='Gives confirmed list of students with review allocation')
parser.add_argument('inputFile',
help='Path to the input xlsx file')
parser.add_argument('-HS', dest='hauptseminar', action='store_true',
help='use this switch to toggle to Hauptseminar')
parser.add_argument('-u', '--update', dest='updatepath', action='append',
help='use this to update an existing master file with new students from moodle')
return parser
def read_srcfile(source_filename):
"""Import the src file from TUMonline into a Pandas dataframe"""
src_wb = load_workbook(source_filename)
print("The available sheets in the xlsx file")
print(src_wb.sheetnames)
src_sheet = src_wb.active
print("selected sheet for data manipulation:")
print(src_sheet)
src_df = pd.DataFrame(src_sheet.values) #return the sheet values as pandas Dataframe
return src_df
def write_masterfile(write_df):
"""Create Xlsx file with current timestamp(to identify latest updated version. Then copying the pandas df in Sheets)"""
writer = pd.ExcelWriter("OutputFiles/master_sheet_" + str(
|
pd.datetime.now()
|
pandas.datetime.now
|
"""
Unit tests for Mlxtend compatibility.
"""
# Author: <NAME>
# License: MIT
import unittest
import pandas as pd
from scipy.stats import randint
from sklearn import __version__ as sk_version
from sklearn.base import clone
from sklearn.datasets import load_boston, load_linnerud
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.model_selection import train_test_split
from sklearn.pipeline import FeatureUnion
from physlearn import Regressor
from physlearn.datasets import load_benchmark
from physlearn.supervised import ShapInterpret
class TestMlxtend(unittest.TestCase):
def test_stacking_regressor_without_cv_gridsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y = pd.DataFrame(X), pd.Series(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42)
stack = dict(regressors=['kneighborsregressor', 'bayesianridge'],
final_regressor='lasso')
reg = Regressor(regressor_choice='mlxtendstackingregressor',
pipeline_transform='standardscaler',
stacking_options=dict(layers=stack))
search_params = dict(reg__kneighborsregressor__n_neighbors=[2, 4, 5],
reg__bayesianridge__alpha_1=[1e-7, 1e-6],
reg__meta_regressor__alpha=[1.0],
tr__with_std=[True, False])
reg.search(X_train, y_train, search_params=search_params)
self.assertLess(reg.best_score_.values, 3.0)
self.assertIn(reg.best_params_['reg__kneighborsregressor__n_neighbors'], [2, 4, 5])
self.assertIn(reg.best_params_['reg__bayesianridge__alpha_1'], [1e-7, 1e-6])
self.assertIn(reg.best_params_['reg__meta_regressor__alpha'], [1.0])
def test_stacking_regressor_with_cv_gridsearchcv(self):
X, y = load_boston(return_X_y=True)
X, y =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import sympy as sp
import sys, os, sympy, shutil, math
# import xmltodict
# import pickle
import json
# import pandas
import pylab
from os import listdir
import pandas as pd
import visuals
import hashlib
import yaml
import os.path
print('postproc')
from matplotlib.ticker import ScalarFormatter
from matplotlib.ticker import FormatStrFormatter
elastic = 'C0'
homogen = 'C1'
localis = 'C2'
unstabl = 'C3'
def load_data(rootdir):
try:
with open(rootdir + '/parameters.pkl', 'r') as f:
params = json.load(f)
#stuff goes here
except IOError:
#do what you want if there is an error with the file opening
print('Could not find pickles')
try:
with open(rootdir + '/parameters.yaml', 'r') as f:
params = yaml.load(f, Loader=yaml.FullLoader)
#stuff goes here
except IOError:
#do what you want if there is an error with the file opening
print('Could not find yaml')
with open(rootdir + '/time_data.json', 'r') as f:
data = json.load(f)
dataf =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
"""Created on Fri Apr 3 11:05:15 2020.
Contains the functions needed for data manipulation
@author: MLechner
-*- coding: utf-8 -*-
"""
import copy
import math
from concurrent import futures
import numpy as np
import pandas as pd
import ray
from mcf import general_purpose as gp
from mcf import general_purpose_estimation as gp_est
from mcf import general_purpose_mcf as gp_mcf
def variable_features(var_x_type, var_x_values):
"""
Show variables and their key features.
Parameters
----------
var_x_type : Dict. Name and type of variable.
var_x_values : Dict. Name and values of variables.
Returns
-------
None.
"""
print('\n')
print(80 * '=')
print('Features used to build causal forests')
print(80 * '-')
for name in var_x_type.keys():
print('{:20} '.format(name), end=' ')
if var_x_type[name] == 0:
print('Ordered ', end='')
if var_x_values[name]:
if isinstance(var_x_values[name][0], float):
for val in var_x_values[name]:
print('{:>6.2f}'.format(val), end=' ')
print(' ')
else:
print(var_x_values[name])
else:
print('Continuous')
else:
print('Unordered ', len(var_x_values[name]), ' different values')
print(80 * '-')
def prepare_data_for_forest(indatei, v_dict, v_x_type, v_x_values, c_dict,
no_y_nn=False, regrf=False):
"""Prepare data for Forest and variable importance estimation.
Parameters
----------
indatei : String. CSV file.
v_dict : DICT. Variables.
v_x_type : List. Type of variables.
v_x_values : List. Values of variables (if not continuous).
c_dict : DICT. Parameters.
Returns
-------
x_name : Dict.
x_type :Dict.
x_values : Dict.
c : Dict. Parameters (updated)
pen_mult : INT. Multiplier for penalty.
data_np : Numpy array. Data for estimation.
y_i : INT. Index of position of y in data_np.
y_nn_i : Numpy array of INT.
x_i : Numpy array of INT.
x_ind : Numpy array of INT.
x_ai_ind : Numpy array of INT.
d_i : INT.
w_i : INT.
cl_i : INT.
"""
x_name, x_type = gp.get_key_values_in_list(v_x_type)
x_type = np.array(x_type)
x_name2, x_values = gp.get_key_values_in_list(v_x_values)
pen_mult = 0
if x_name != x_name2:
raise Exception('Wrong order of names', x_name, x_name2)
p_x = len(x_name) # Number of variables
c_dict = m_n_grid(c_dict, p_x) # Grid for # of var's used for split
x_ind = np.array(range(p_x)) # Indices instead of names of variable
x_ai_ind = [] # Indices of variables used for all splits
if not v_dict['x_name_always_in'] == []:
always_in_set = set(v_dict['x_name_always_in'])
x_ai_ind = np.empty(len(always_in_set), dtype=np.uint32)
j = 0
for i in range(p_x):
if x_name[i] in always_in_set:
x_ai_ind[j] = i
j = j + 1
data =
|
pd.read_csv(indatei)
|
pandas.read_csv
|
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
|
tm.assert_index_equal(inner, aware)
|
pandas.util.testing.assert_index_equal
|
# dev_data.py
#In[0]
import os, sys
from datetime import datetime, timedelta, time as dtime
import time
import numpy as np
import pandas as pd
import string
import random
def mock_df():
area = pd.Series({0: 423967, 1: 695662, 2: 141297, 3: 170312, 4: 149995})
population = pd.Series(
{0: 38332521, 1: 26448193, 2: 19651127, 3: 19552860, 4: 12882135})
population = population.astype(float)
states = ['California', 'Texas', 'New York', 'Florida', 'Illinois']
df = pd.DataFrame({'states': states,
'area': area, 'population': population}, index=range(len(states)))
dates = [pd.to_datetime('06-15-2020') +
|
pd.DateOffset(i)
|
pandas.DateOffset
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 26 22:55:37 2020
@author: <NAME> <EMAIL>
Data and Model from:
A conceptual model for the coronavirus disease 2019 (COVID-19)
outbreak in Wuhan, China with individual reaction and
governmental action
DOI:https://doi.org/10.1016/j.ijid.2020.02.058
https://www.ijidonline.com/article/S1201-9712(20)30117-X/fulltext
"""
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
import numpy as np
import math
import pandas as pd
import os
import time
start = time.time() #Real time when the program starts to run
clear = lambda: os.system('cls')
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
path_fol = "{}\SEIR Model for Spread of Disease".format(dir_path)
try:
os.mkdir(path_fol)
except:
pass
def R0(α, β, μ, γ):
# R_0 = (α/(μ + α))*(β/(μ + λ))
R_0 = (β/γ)*(α/(α + μ))
return R_0
def R0b(β, γ, σ, μ):
return (β*σ)/((γ + μ)*(μ + σ))
def aplha(day):
if int(day) <= 23:
return 0
elif int(day) > 23 and int(day) <= 29:
return 0.4239
else:
return 0.8478
def Beta(α,β0, D, N, k):
B = β0*(1 - α)*((1 - D/N)**k)
return B
def SEIR(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
dsdt = Λ - μ*y[0] - ((β*F*y[0])/y[4]) - (β_t/y[4])*y[2]*y[0]
dedt = ((β*F*y[0])/y[4]) + (β_t/y[4])*y[2]*y[0] - (μ + σ)*y[1]
didt = σ*y[1] - (μ + γ)*y[2]
drdt = γ*y[2] - μ*y[3]
dndt = -μ*y[4]
dDdt = d*γ*y[2] - λ*y[5]
dcdt = σ*y[1]
return [dsdt, dedt, didt, drdt, dndt, dDdt, dcdt]
def jacobian(t, y, *args):
σ, β, γ, μ, Λ, F, α, d, κ, λ = args
β_t = Beta(α, β, y[5], y[4], κ)
return [[-F*β/y[4]- y[2]*β_t/y[4]- μ, 0, -y[0]*β_t/y[4], 0, F*y[0]*β/y[4]**2 + y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ F*β/y[4]+ y[2]*β_t/y[4], -μ - σ, y[0]*β_t/y[4], 0, -F*y[0]*β/y[4]**2 - y[2]*y[0]*β_t/y[4]**2, 0, 0],
[ 0, σ, -γ - μ, 0, 0, 0, 0],
[ 0, 0, γ, -μ, 0, 0, 0],
[ 0, 0, 0, 0, -μ, 0, 0],
[ 0, 0, d*γ, 0, 0, -λ, 0],
[ 0, σ, 0, 0, 0, 0, 0]]
def roundup(x, places):
return int(math.ceil(x / int(places))) * int(places)
Λ = 0.0 # Birth rate
μ = 0.0 # Death rate
# Λ = 0.01 # Birth rate
# μ = 0.0205 # Death rate
Tc = 2.0 # Typical time between contacts
# β = 0.5944 #1.0/Tc
β = 1.68
# Tr = 11.1 # Typical time until recovery
Tinfs = [2.9, 2.3, 2.3, 2.9, 10.0, 1.5]
# Tr = sum(Tinfs)/len(Tinfs) #5.0
# Tr = 11.1
Tr = 14.0
γ = 1.0/Tr
Tincs = [5.2, 5.2, 6.1, 5.5, 4.8, 5.0, 6.5, 4.8]
Tinc = sum(Tincs)/len(Tincs)
σ = Tinc**-1
# σ = 3.0**-1
F = 10
α = 0.0
# α = 0.05
# α = 0.4239
# α = 0.8478
d = 0.05
# k = 1117.3
# k = 200
k = 0
λb = 11.2
λ = λb**-1
Infi = 10 # Initial infected
Daysnn = 150
NP = 329436928 # 1437904257
S0 = NP - Infi
its = 10000
itern = Daysnn/its
Days = [0.0, Daysnn]
Time = [i for i in range(0, int(Daysnn + 1), 1)]
tt = list(range(0,its,1))
Time_f = [i*itern for i in tt]
Y0 = [NP, 0.0, Infi, 0.0, NP, d, Infi]
Ro = R0b(β, γ, σ, μ)
# print(Ro)
# print('Λ')
# print('μ')
# print(α)
# print(β)
# print(Ro, 1.68**-1)
# print(λ)
# print(σ)
answer = solve_ivp(SEIR, Days, Y0, t_eval=Time_f, method = 'Radau', args=(σ, β, γ, μ, Λ, F, α, d, k, λ), jac=jacobian, rtol=1E-10, atol=1E-10)
ts = answer.t
Bs = [Beta(σ, β, i, j, k) for i,j in zip(answer.y[5],answer.y[4])]
Sn = answer.y[0]
En = answer.y[1]
In = answer.y[2]
Rn = answer.y[3]
Nn = answer.y[4]
Dn = answer.y[5]
Cn = answer.y[6]
Spb = answer.y[0]/NP
Epb = answer.y[1]/NP
Ipb = answer.y[2]/NP
Rpb = answer.y[3]/NP
Npb = answer.y[4]/NP
Dpb = answer.y[5]/NP
Cpb = answer.y[6]/NP
Sp = [i*100.0 for i in Spb]
Ep = [i*100.0 for i in Epb]
Ip = [i*100.0 for i in Ipb]
Rp = [i*100.0 for i in Rpb]
Np = [i*100.0 for i in Npb]
Dp = [i*100.0 for i in Dpb]
Cp = [i*100.0 for i in Cpb]
m = max(In)
mi = (In.tolist()).index(max(In))
mip = mi/its
peakn = round(Daysnn*mip)
my = max(Ip)
myi = (Ip).index(max(Ip))
myp = myi/its
peakyn = round(Daysnn*myp)
PEAK = [int(round(Daysnn*(mi/its)))]
nPEAK = np.array(PEAK, ndmin=2)
Tdata = np.array((Time_f, Sn, En, In, Rn))
TTdata = np.array((Time_f, Spb, Epb, Ipb, Rpb, Sp, Ep, Ip, Rp))
Tdatal = Tdata.tolist()
if its <= 16384:
writer = pd.ExcelWriter(r'{}\SIR Population.xlsx', engine='xlsxwriter')
writerp = pd.ExcelWriter(r'{}\SIR Percent.xlsx', engine='xlsxwriter')
indexes = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered', 'Peak [Day]']
indexest = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered' , 'Susceptible [%]', 'Exposed [%]', 'Infected [%]', 'Recovered [%]', 'Peak [Day]']
df = pd.DataFrame([Time_f, Sn, En, In, Rn, PEAK], index=[*indexes])
dft = pd.DataFrame([Time_f, Spb, Epb, Ipb, Rpb, Sp, Ep, Ip, Rp, PEAK], index=[*indexest])
df.to_excel(r"{}\SIR Population.xlsx".format(path_fol), sheet_name="SIR Population.xlsx", header=True, startrow=1)
dft.to_excel(r"{}\SIR Percent.xlsx".format(path_fol), sheet_name="SIR Percent.xlsx", header=True, startrow=1)
elif its > 16384 and its <= 1048576:
writer = pd.ExcelWriter(r'{}\SIR Population.xlsx', engine='xlsxwriter')
writerp = pd.ExcelWriter(r'{}\SIR Percent.xlsx', engine='xlsxwriter')
indexesb = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered']
indexestb = ['Time [Days]', 'Susceptible', 'Exposed', 'Infected', 'Recovered' , 'Susceptible [%]', 'Exposed [%]', 'Infected [%]', 'Recovered [%]']
df = pd.DataFrame(Tdata.T, columns=[*indexesb])
df.T
df.loc[:,'Peak [Day]'] =
|
pd.Series([PEAK])
|
pandas.Series
|
# SPDX-FileCopyrightText: : 2017-2020 The PyPSA-Eur Authors
#
# SPDX-License-Identifier: MIT
"""
Creates Voronoi shapes for each bus representing both onshore and offshore regions.
Relevant Settings
-----------------
.. code:: yaml
countries:
.. seealso::
Documentation of the configuration file ``config.yaml`` at
:ref:`toplevel_cf`
Inputs
------
- ``resources/country_shapes.geojson``: confer :ref:`shapes`
- ``resources/offshore_shapes.geojson``: confer :ref:`shapes`
- ``networks/base.nc``: confer :ref:`base`
Outputs
-------
- ``resources/regions_onshore.geojson``:
.. image:: ../img/regions_onshore.png
:scale: 33 %
- ``resources/regions_offshore.geojson``:
.. image:: ../img/regions_offshore.png
:scale: 33 %
Description
-----------
"""
import logging
from _helpers import configure_logging
import pypsa
import os
import pandas as pd
import geopandas as gpd
from vresutils.graph import voronoi_partition_pts
logger = logging.getLogger(__name__)
def save_to_geojson(s, fn):
if os.path.exists(fn):
os.unlink(fn)
schema = {**gpd.io.file.infer_schema(s), 'geometry': 'Unknown'}
s.to_file(fn, driver='GeoJSON', schema=schema)
if __name__ == "__main__":
if 'snakemake' not in globals():
from _helpers import mock_snakemake
snakemake = mock_snakemake('build_bus_regions')
configure_logging(snakemake)
countries = snakemake.config['countries']
n = pypsa.Network(snakemake.input.base_network)
country_shapes = gpd.read_file(snakemake.input.country_shapes).set_index('name')['geometry']
offshore_shapes = gpd.read_file(snakemake.input.offshore_shapes).set_index('name')['geometry']
onshore_regions = []
offshore_regions = []
for country in countries:
c_b = n.buses.country == country
onshore_shape = country_shapes[country]
onshore_locs = n.buses.loc[c_b & n.buses.substation_lv, ["x", "y"]]
onshore_regions.append(gpd.GeoDataFrame({
'name': onshore_locs.index,
'x': onshore_locs['x'],
'y': onshore_locs['y'],
'geometry': voronoi_partition_pts(onshore_locs.values, onshore_shape),
'country': country
}))
if country not in offshore_shapes.index: continue
offshore_shape = offshore_shapes[country]
offshore_locs = n.buses.loc[c_b & n.buses.substation_off, ["x", "y"]]
offshore_regions_c = gpd.GeoDataFrame({
'name': offshore_locs.index,
'x': offshore_locs['x'],
'y': offshore_locs['y'],
'geometry': voronoi_partition_pts(offshore_locs.values, offshore_shape),
'country': country
})
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
offshore_regions.append(offshore_regions_c)
save_to_geojson(pd.concat(onshore_regions, ignore_index=True), snakemake.output.regions_onshore)
save_to_geojson(
|
pd.concat(offshore_regions, ignore_index=True)
|
pandas.concat
|
import importlib
import re
import pandas as pd
import numpy as np
import joblib
from tqdm import tqdm
from pprint import pformat, pprint
from misc_utils import np_temp_seed, none_or_nan
ELEMENT_LIST = ['H', 'C', 'O', 'N', 'P', 'S', 'Cl']
TWO_LETTER_TOKEN_NAMES = [
'Al', 'Ce', 'Co', 'Ge', 'Gd', 'Cs', 'Th', 'Cd', 'As', 'Na', 'Nb', 'Li',
'Ni', 'Se', 'Sc', 'Sb', 'Sn', 'Hf', 'Hg', 'Si', 'Be', 'Cl', 'Rb', 'Fe',
'Bi', 'Br', 'Ag', 'Ru', 'Zn', 'Te', 'Mo', 'Pt', 'Mn', 'Os', 'Tl', 'In',
'Cu', 'Mg', 'Ti', 'Pb', 'Re', 'Pd', 'Ir', 'Rh', 'Zr', 'Cr', '@@', 'se',
'si', 'te'
]
LC_TWO_LETTER_MAP = {
"se": "Se", "si": "Si", "te": "Te"
}
def rdkit_import(*module_strs):
RDLogger = importlib.import_module("rdkit.RDLogger")
RDLogger.DisableLog('rdApp.*')
modules = []
for module_str in module_strs:
modules.append(importlib.import_module(module_str))
return tuple(modules)
def normalize_ints(ints):
try:
total_ints = sum(ints)
except:
import pdb; pdb.set_trace()
ints = [ints[i] / total_ints for i in range(len(ints))]
return ints
def randomize_smiles(smiles, rseed, isomeric=False, kekule=False):
"""Perform a randomization of a SMILES string must be RDKit sanitizable"""
if rseed == -1:
return smiles
modules = rdkit_import("rdkit.Chem")
Chem = modules[0]
m = Chem.MolFromSmiles(smiles)
assert not (m is None)
ans = list(range(m.GetNumAtoms()))
with np_temp_seed(rseed):
np.random.shuffle(ans)
nm = Chem.RenumberAtoms(m,ans)
smiles = Chem.MolToSmiles(nm, canonical=False, isomericSmiles=isomeric, kekuleSmiles=kekule)
assert not (smiles is None)
return smiles
def split_smiles(smiles_str):
token_list = []
ptr = 0
while ptr < len(smiles_str):
if smiles_str[ptr:ptr + 2] in TWO_LETTER_TOKEN_NAMES:
smiles_char = smiles_str[ptr:ptr + 2]
if smiles_char in LC_TWO_LETTER_MAP:
smiles_char = LC_TWO_LETTER_MAP[smiles_char]
token_list.append(smiles_char)
ptr += 2
else:
smiles_char = smiles_str[ptr]
token_list.append(smiles_char)
ptr += 1
return token_list
def list_replace(l,d):
return [d[data] for data in l]
def mol_from_inchi(inchi):
modules = rdkit_import("rdkit.Chem")
Chem = modules[0]
try:
mol = Chem.MolFromInchi(inchi)
except:
mol = np.nan
if none_or_nan(mol):
mol = np.nan
return mol
def mol_from_smiles(smiles):
modules = rdkit_import("rdkit.Chem")
Chem = modules[0]
try:
mol = Chem.MolFromSmiles(smiles)
except:
mol = np.nan
if none_or_nan(mol):
mol = np.nan
return mol
def mol_to_smiles(mol,canonical=True,isomericSmiles=False,kekuleSmiles=False):
modules = rdkit_import("rdkit.Chem")
Chem = modules[0]
try:
smiles = Chem.MolToSmiles(mol,canonical=canonical,isomericSmiles=isomericSmiles,kekuleSmiles=kekuleSmiles)
except:
smiles = np.nan
return smiles
def mol_to_formula(mol):
modules = rdkit_import("rdkit.Chem.AllChem")
AllChem = modules[0]
try:
formula = AllChem.CalcMolFormula(mol)
except:
formula = np.nan
return formula
def mol_to_inchikey(mol):
modules = rdkit_import("rdkit.Chem.inchi")
inchi = modules[0]
try:
inchikey = inchi.MolToInchiKey(mol)
except:
inchikey = np.nan
return inchikey
def mol_to_inchikey_s(mol):
modules = rdkit_import("rdkit.Chem.inchi")
inchi = modules[0]
try:
inchikey = inchi.MolToInchiKey(mol)
inchikey_s = inchikey[:14]
except:
inchikey_s = np.nan
return inchikey_s
def mol_to_inchi(mol):
modules = rdkit_import("rdkit.Chem.rdinchi")
rdinchi = modules[0]
try:
inchi = rdinchi.MolToInchi(mol,options='-SNon')
except:
inchi = np.nan
return inchi
def mol_to_mol_weight(mol,exact=True):
modules = rdkit_import("rdkit.Chem.Descriptors")
Desc = modules[0]
if exact:
mol_weight = Desc.ExactMolWt(mol)
else:
mol_weight = Desc.MolWt(mol)
return mol_weight
def inchi_to_smiles(inchi):
try:
mol = mol_from_inchi(inchi)
smiles = mol_to_smiles(mol)
except:
smiles = np.nan
return smiles
def smiles_to_selfies(smiles):
sf, Chem = rdkit_import("selfies","rdkit.Chem")
try:
# canonicalize, strip isomeric information, kekulize
mol = Chem.MolFromSmiles(smiles)
smiles = Chem.MolToSmiles(mol,canonical=False,isomericSmiles=False,kekuleSmiles=True)
selfies = sf.encoder(smiles)
except:
selfies = np.nan
return selfies
def make_morgan_fingerprint(mol, radius=3):
modules = rdkit_import("rdkit.Chem.rdMolDescriptors","rdkit.DataStructs")
rmd = modules[0]
ds = modules[1]
fp = rmd.GetHashedMorganFingerprint(mol,radius)
fp_arr = np.zeros(1)
ds.ConvertToNumpyArray(fp, fp_arr)
return fp_arr
def make_rdkit_fingerprint(mol):
chem, ds = rdkit_import("rdkit.Chem","rdkit.DataStructs")
fp = chem.RDKFingerprint(mol)
fp_arr = np.zeros(1)
ds.ConvertToNumpyArray(fp,fp_arr)
return fp_arr
def make_maccs_fingerprint(mol):
maccs, ds = rdkit_import("rdkit.Chem.MACCSkeys","rdkit.DataStructs")
fp = maccs.GenMACCSKeys(mol)
fp_arr = np.zeros(1)
ds.ConvertToNumpyArray(fp,fp_arr)
return fp_arr
def split_selfies(selfies_str):
selfies = importlib.import_module("selfies")
selfies_tokens = list(selfies.split_selfies(selfies_str))
return selfies_tokens
def seq_apply(iterator,func):
result = []
for i in iterator:
result.append(func(i))
return result
def par_apply(iterator,func):
n_jobs = joblib.cpu_count()
par_func = joblib.delayed(func)
result = joblib.Parallel(n_jobs=n_jobs)(par_func(i) for i in iterator)
return result
def par_apply_series(series,func):
series_iter = tqdm(series.iteritems(),desc=pformat(func),total=series.shape[0])
series_func = lambda tup: func(tup[1])
result_list = par_apply(series_iter,series_func)
result_series =
|
pd.Series(result_list,index=series.index)
|
pandas.Series
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 16:16:48 2019
@author: logiusti
"""
import os
import numpy as np
import pandas as pd
from Utility import timeit
import pickle
from astropy.convolution import Gaussian1DKernel
from sklearn.preprocessing import MinMaxScaler
class Loader:
r"""
Class which provides all the functionalities to load the data
"""
def __init__(self, ups_data_path=r"C:\Users\logiusti\Lorenzo\Data\coll"):
self.__ups_data_path = ups_data_path
self.__ups_to_commission_date = self.load_commission_dates(r"C:\Users\logiusti\Lorenzo\Data\Grapheable\commission_date_df.csv")
self.__ups_to_temperature = self.load_temperatures()
self.__ups_name_list = self.retrive_ups_name_list()
self.__ups_to_eta = self.get_all_eta()
self.__ups_to_clicks = self.count_clicks()
self.__ups_to_overheat = self.count_overheats()
def set_all_public_variables(self):
self.ups_to_eta = self.__ups_to_eta
self.ups_to_temperature = self.__ups_to_temperature
@timeit
def count_clicks(self):
r"""
Load all the clicks of the ups.
For click we mean a single load/unload process.
Returns the dictionary with the ups as key and the associated clicks as value
in self.__ups_to_clicks there's a copy of the result
Returns
-------
ups_to_clicks : dict
dictionary containing the ups position as key,
the data of its temperature as value
Examples
--------
>>> from wrapper import count_clicks
>>> count_clicks()
{
'EAS11_SLASH_2HB': 60,
'EAS11_SLASH_8H': 94,
'EAS11_SLASH_A3': 29,
'EAS11_SLASH_A7': 2,
'EAS1_SLASH_2HB': 71,
'EAS1_SLASH_8H': 89,
'EAS212_SLASH_MS1': 75
}
"""
ups_to_clicks = {}
pwd = os.getcwd()
if os.path.isfile(r"./data/ups_to_clicks.pickle"):
with open(r"./data/ups_to_clicks.pickle", "rb") as input_file:
return pickle.load(input_file)
os.chdir(self.__ups_data_path)
for ups in os.listdir():
df_tmp = pd.read_csv(ups+"/"+ups+"_IBat.csv")
df_tmp['Time'] =
|
pd.to_datetime(df_tmp['Time'], format="%Y-%m-%d %H:%M:%S.%f")
|
pandas.to_datetime
|
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 12:22:30 2019
@author: nk7g14
This file contains scripts for 'FluxQuery' for the swift telescope.
"""
import re
import os
import logging
import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from astropy.io import fits
import fluxquery.auxil as aux
class SWIFT:
def __init__(self):
super(SWIFT, self).__init__()
self.SWIFT_OBS_LIST = aux.GetObservationList(self.SOURCE_NAME, 'swiftmastr')
self.LIGHTCURVE_SWIFT_UVOT = None
def SWIFT_DownloadEventFiles(self):
'''
Downloads (level 2) Screened event files from UK Swift SSDC
'''
cwd = os.getcwd() #Current Working Directory
for i, obsID in enumerate(self.SWIFT_OBS_LIST['OBSID']):
url_xrt = 'http://www.swift.ac.uk/archive/reproc/{}/xrt/event/sw{}xpcw3po_cl.evt.gz'.format(obsID, obsID)
savedir_xrt = '{}/sources/{}/swift/xrt/sw{}xpcw3po_cl.evt.gz'.format(cwd, self.SOURCE_NAME, obsID)
url_uvot_cat = 'http://www.swift.ac.uk/archive/reproc/{}/uvot/products/sw{}u.cat.gz'.format(obsID, obsID)
savedir_uvot_cat = '{}/sources/{}/swift/uvot/cat/sw{}u.cat.gz'.format(cwd, self.SOURCE_NAME, obsID)
url_uvot_img = 'http://www.swift.ac.uk/archive/reproc/{}/uvot/image/sw{}uuu_sk.img.gz'.format(obsID, obsID)
savedir_uvot_img = '{}/sources/{}/swift/uvot/img/sw{}uuu_sk.img.gz'.format(cwd, self.SOURCE_NAME, obsID)
aux.FetchFile(url_xrt, savedir_xrt)
aux.FetchFile(url_uvot_cat, savedir_uvot_cat)
aux.FetchFile(url_uvot_img, savedir_uvot_img)
# aux.FetchFile('http://www.swift.ac.uk/archive/reproc/%s/uvot/image/sw%suuu_rw.img.gz' % (obsID,obsID),
# '%s/%s/uvot/img/sw%suuu_rw.img.gz' % (cwd, self.SOURCE_NAME, obsID))
def SWIFT_CleanUpgzFiles(self):
aux.UnzipAllgzFiles('sources/{}/swift/xrt'.format(self.SOURCE_NAME))
aux.RemoveAllgzFiles('sources/{}/swift/xrt'.format(self.SOURCE_NAME))
aux.UnzipAllgzFiles('sources/{}/swift/uvot/img'.format(self.SOURCE_NAME))
aux.RemoveAllgzFiles('sources/{}/swift/uvot/img'.format(self.SOURCE_NAME))
aux.UnzipAllgzFiles('sources/{}/swift/uvot/cat'.format(self.SOURCE_NAME))
aux.RemoveAllgzFiles('sources/{}/swift/uvot/cat'.format(self.SOURCE_NAME))
def SWIFT_GetStartTimes(self):
obsID = np.array(self.SWIFT_OBS_LIST['OBSID'], dtype='str')
start_time = np.array(self.SWIFT_OBS_LIST['START_TIME'])
df_starts =
|
pd.DataFrame()
|
pandas.DataFrame
|
import requests
import pandas as pd
# Provided by Quiverquant guys to GST users
API_QUIVERQUANT_KEY = "5cd2a65e96d0486efbe926a7cdbc1e8d8ab6c7b3"
def get_government_trading(gov_type: str, ticker: str = "") -> pd.DataFrame:
"""Returns the most recent transactions by members of government
Parameters
----------
gov_type: str
Type of government data between: Congress, Senate, House and Contracts
ticker : str
Ticker to get congress trading data from
Returns
-------
pd.DataFrame
Most recent transactions by members of U.S. Congress
"""
if gov_type == "congress":
if ticker:
url = (
f"https://api.quiverquant.com/beta/historical/congresstrading/{ticker}"
)
else:
url = "https://api.quiverquant.com/beta/live/congresstrading"
elif gov_type == "senate":
if ticker:
url = f"https://api.quiverquant.com/beta/historical/senatetrading/{ticker}"
else:
url = "https://api.quiverquant.com/beta/live/senatetrading"
elif gov_type == "house":
if ticker:
url = f"https://api.quiverquant.com/beta/historical/housetrading/{ticker}"
else:
url = "https://api.quiverquant.com/beta/live/housetrading"
elif gov_type == "contracts":
if ticker:
url = (
f"https://api.quiverquant.com/beta/historical/govcontractsall/{ticker}"
)
else:
url = "https://api.quiverquant.com/beta/live/govcontractsall"
else:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
#找出列表中重复披露的行,取最近的日期
# -*- coding: utf-8 -*-
from datetime import date, datetime, timedelta
import time as t
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import talib
#Load Tushare
from rqalpha.apis.api_base import history_bars, get_position
from rqalpha.mod.rqalpha_mod_sys_accounts.api.api_stock import order_target_value, order_value
import Utils.configuration_file_service as config_service
import tushare as ts
token = config_service.getProperty(section_name=config_service.TOKEN_SECTION_NAME,
property_name=config_service.TS_TOKEN_NAME)
pro = ts.pro_api(token)
import Utils.numeric_utils as TuRq
#df2 = pro.index_classify(level='L2', src='SW')
#日期格式为 月/日/年
start_date = '4/1/2020'
end_date = '7/1/2020'
start_date_p = '1/1/2020'
end_date_p = '4/1/2020'
#本季度日期范围
datelist = pd.date_range(start=start_date, end=end_date)
date_strings = [datetime.strftime(d, '%Y%m%d') for d in datelist]
#上季度日期范围
datelist_prior = pd.date_range(start=start_date_p, end=end_date_p)
date_strings_prior = [datetime.strftime(d, '%Y%m%d') for d in datelist_prior]
#所有季度基金公司持股文件读取
fund_list = pd.read_excel (r'C:\Users\Austin\Desktop\Tushare\Tushare_Fund_data.xlsx', index = False)
#本季度基金公司持股
fund_list_snapshot = fund_list[fund_list.end_date.isin(date_strings)]
#新建一个column使得每个row的数据都是唯一的。第一步把end_date 改成string 格式
fund_list_snapshot2 = fund_list_snapshot.copy()
fund_list_snapshot2.loc[:, 'end_date2'] = fund_list_snapshot2['end_date'].apply(str)
fund_list_snapshot2.loc[:, 'uniquecode1'] = fund_list_snapshot2.ts_code + "_" + fund_list_snapshot2.end_date2
fund_list_snapshot2.loc[:, 'uniquecode2'] = fund_list_snapshot2.ts_code + "_" + fund_list_snapshot2.symbol + "_" + fund_list_snapshot2.end_date2
#找出列表中重复披露的行,取最近的日期
keep_list = fund_list_snapshot.copy()
keep_list.loc[:, 'uniquecode1'] = keep_list.ts_code + "_" + keep_list['end_date'].apply(str)
keep_list1 = keep_list.drop_duplicates(subset=['ts_code'], keep='first')
keep_list1 = keep_list1['uniquecode1'].to_list()
#选取列表中唯一披露的行
fund_list_snapshot2 = fund_list_snapshot2[fund_list_snapshot2['uniquecode1'].isin(keep_list1)]
#最终列表整理
fund_list_snapshot2 = pd.pivot_table(fund_list_snapshot2,index=["symbol"],aggfunc={'stk_float_ratio':np.sum,'ts_code':np.count_nonzero,'mkv':np.sum,'amount':np.sum})
fund_list_snapshot2 = fund_list_snapshot2.sort_values(by='stk_float_ratio', ascending=False)
#上季度基金公司持股
fund_list_snapshot_prior = fund_list[fund_list.end_date.isin(date_strings_prior)]
#新建一个column使得每个row的数据都是唯一的。第一步把end_date 改成string 格式
fund_list_snapshot_prior2 = fund_list_snapshot_prior.copy()
fund_list_snapshot_prior2.loc[:, 'end_date2'] = fund_list_snapshot_prior2['end_date'].apply(str)
fund_list_snapshot_prior2.loc[:, 'uniquecode1'] = fund_list_snapshot_prior2.ts_code + "_" + fund_list_snapshot_prior2.end_date2
fund_list_snapshot_prior2.loc[:, 'uniquecode2'] = fund_list_snapshot_prior2.ts_code + "_" + fund_list_snapshot_prior2.symbol + "_" + fund_list_snapshot_prior2.end_date2
#找出列表中重复披露的行,取最近的日期
keep_list_prior = fund_list_snapshot_prior.copy()
keep_list_prior.loc[:, 'uniquecode1'] = keep_list_prior.ts_code + "_" + keep_list_prior['end_date'].apply(str)
keep_list_prior1 = keep_list_prior.drop_duplicates(subset=['ts_code'], keep='first')
keep_list_prior1 = keep_list_prior1['uniquecode1'].to_list()
#选取列表中唯一披露的行
fund_list_snapshot_prior2 = fund_list_snapshot_prior2[fund_list_snapshot_prior2['uniquecode1'].isin(keep_list_prior1)]
#最终列表整理
fund_list_snapshot_prior2 = pd.pivot_table(fund_list_snapshot_prior2,index=["symbol"],aggfunc={'stk_float_ratio':np.sum,'ts_code':np.count_nonzero,'mkv':np.sum,'amount':np.sum})
fund_list_snapshot_prior2 = fund_list_snapshot_prior2.sort_values(by='stk_float_ratio', ascending=False)
#Export the df to excel
fund_list_snapshot2.to_excel(r'C:\Users\Austin\Desktop\Tushare\fund_list_snapshot.xlsx', index = True)
fund_list_snapshot_prior2.to_excel(r'C:\Users\Austin\Desktop\Tushare\fund_list_snapshot_prior.xlsx', index = True)
#本季度和上一季度基金公司持股文件读取
fund_list = pd.read_excel (r'C:\Users\Austin\Desktop\Tushare\fund_list_snapshot.xlsx', index = False)
fund_list_prior = pd.read_excel (r'C:\Users\Austin\Desktop\Tushare\fund_list_snapshot_prior.xlsx', index = False)
#上季度列名称改名,防止重复
fund_list_prior = fund_list_prior.rename(columns={'amount': 'amount_p', 'mkv': 'mkv_p', 'stk_float_ratio': 'stk_float_ratio_p', 'ts_code': 'ts_code_p'})
#合并两个基金表格
#summary_fund_list = pd.concat([fund_list, fund_list_prior], axis=1, join='outer', sort=False)
summary_fund_list =
|
pd.merge(fund_list, fund_list_prior, on='symbol', how='outer')
|
pandas.merge
|
import numpy as np
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
df_train = pd.read_csv('train.csv')
df=pd.DataFrame(df_train)
m1 = pd.crosstab(index=df_train["Type"],columns="count")
m2 = pd.crosstab(index=df_train["Gender"],columns="count")
m3 = pd.crosstab(index=df_train["Color1"],columns="count")
m33 = pd.crosstab(index=df_train["Color2"],columns="count")
m4 = pd.crosstab(index=df_train["MaturitySize"],columns="count")
m5 = pd.crosstab(index=df_train["FurLength"],columns="count")
m6 = pd.crosstab(index=df_train["Vaccinated"],columns="count")
m7 = pd.crosstab(index=df_train["State"],columns="count")
m8 =
|
pd.crosstab(index=df_train["VideoAmt"],columns="count")
|
pandas.crosstab
|
import itertools
import operator
from os.path import dirname, join
import numpy as np
import pandas as pd
import pytest
from pandas.core import ops
from pandas.tests.extension import base
from pandas.tests.extension.conftest import ( # noqa: F401
as_array,
as_frame,
as_series,
fillna_method,
groupby_apply_op,
use_numpy,
)
from pint.errors import DimensionalityError
from pint.testsuite import QuantityTestCase, helpers
import pint_pandas as ppi
from pint_pandas import PintArray
ureg = ppi.PintType.ureg
@pytest.fixture(params=[True, False])
def box_in_series(request):
"""Whether to box the data in a Series"""
return request.param
@pytest.fixture
def dtype():
return ppi.PintType("pint[meter]")
@pytest.fixture
def data():
return ppi.PintArray.from_1darray_quantity(
np.arange(start=1.0, stop=101.0) * ureg.nm
)
@pytest.fixture
def data_missing():
return ppi.PintArray.from_1darray_quantity([np.nan, 1] * ureg.meter)
@pytest.fixture
def data_for_twos():
x = [
2.0,
] * 100
return ppi.PintArray.from_1darray_quantity(x * ureg.meter)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
@pytest.fixture
def data_repeated(data):
"""Return different versions of data for count times"""
# no idea what I'm meant to put here, try just copying from https://github.com/pandas-dev/pandas/blob/master/pandas/tests/extension/integer/test_integer.py
def gen(count):
for _ in range(count):
yield data
yield gen
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture
def data_for_sorting():
return ppi.PintArray.from_1darray_quantity([0.3, 10, -50] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [1 * ureg.meter, 3 * ureg.meter, 10 * ureg.centimeter]
@pytest.fixture
def data_missing_for_sorting():
return ppi.PintArray.from_1darray_quantity([4, np.nan, -5] * ureg.centimeter)
# should probably get more sophisticated and do something like
# [4 * ureg.meter, np.nan, 10 * ureg.centimeter]
@pytest.fixture
def na_cmp():
"""Binary operator for comparing NA values."""
return lambda x, y: bool(np.isnan(x.magnitude)) & bool(np.isnan(y.magnitude))
@pytest.fixture
def na_value():
return ppi.PintType("meter").na_value
@pytest.fixture
def data_for_grouping():
# should probably get more sophisticated here and use units on all these
# quantities
a = 1.0
b = 2.0 ** 32 + 1
c = 2.0 ** 32 + 10
return ppi.PintArray.from_1darray_quantity(
[b, b, np.nan, np.nan, a, a, b, c] * ureg.m
)
# === missing from pandas extension docs about what has to be included in tests ===
# copied from pandas/pandas/conftest.py
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
# commented functions aren't implemented
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
# "prod",
# "std",
# "var",
"median",
# "kurt",
# "skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
# =================================================================
class TestCasting(base.BaseCastingTests):
pass
class TestConstructors(base.BaseConstructorsTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
result = pd.Series(index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
# GH 33559 - empty index
result = pd.Series(index=[], dtype=dtype)
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_series_constructor_scalar_with_index(self, data, dtype):
scalar = data[0]
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
self.assert_series_equal(result, expected)
result = pd.Series(scalar, index=["foo"], dtype=dtype)
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
self.assert_series_equal(result, expected)
class TestDtype(base.BaseDtypeTests):
pass
class TestGetitem(base.BaseGetitemTests):
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
msg = f"Boolean index has wrong length: 2 instead of {len(data)}"
with pytest.raises(IndexError, match=msg):
data[mask]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match=msg):
data[mask]
class TestGroupby(base.BaseGroupbyTests):
@pytest.mark.xfail(run=True, reason="__iter__ / __len__ issue")
def test_groupby_apply_identity(self, data_for_grouping):
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
result = df.groupby("A").B.apply(lambda x: x.array)
expected = pd.Series(
[
df.B.iloc[[0, 1, 6]].array,
df.B.iloc[[2, 3]].array,
df.B.iloc[[4, 5]].array,
df.B.iloc[[7]].array,
],
index=pd.Index([1, 2, 3, 4], name="A"),
name="B",
)
self.assert_series_equal(result, expected)
class TestInterface(base.BaseInterfaceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.filterwarnings("ignore::pint.UnitStrippedWarning")
# See test_setitem_mask_broadcast note
@pytest.mark.parametrize("dropna", [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = all_data[~all_data.isna()]
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected =
|
pd.Series(other)
|
pandas.Series
|
"""Script where the final pipeline is trained from scratch (preprocessing, encoding, decoding) with optimal
hyperparameters on all the training data and applied to the test set"""
import argparse
import os
from typing import Dict
import joblib
from sklearn.metrics import precision_recall_fscore_support
from pk_classifier.bootstrap import simple_tokenizer, TextSelector, str2bool
from pk_classifier.utils import read_crossval, ConcatenizerEmb, Embedder, EmbeddingsJoiner
from pk_classifier.utils import make_preprocessing_pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline, FeatureUnion
import pandas as pd
import time
import xgboost as xgb
def train_final_pipeline(train_data: pd.DataFrame, train_labels: pd.DataFrame, best_params_cv: Dict) -> Pipeline:
assert train_data.pmid.to_list() == train_labels.pmid.to_list()
# ============ 1. Preprocessing and BERT embeddings pipeline =================================== #
preprocess_and_bert_pipe = Pipeline([
('tokens', FeatureUnion(transformer_list=make_preprocessing_pipeline(field_list=["title", "abstract",
"mesh_terms",
"publication_types"], ngram=1),
n_jobs=-1)),
('tokens_conc', ConcatenizerEmb(" ;; ")),
('embedder', Embedder(fields=['abstract', 'title', 'BoW_Ready'], maxmin=False)),
('embeddings_joiner', EmbeddingsJoiner(out_colname='embedding'))
])
# ============ 2. BoW encoding and classification pipeline =================================== #
rd_seed = 10042006
train_labels = train_labels['label']
balancing_factor = train_labels.value_counts()["Not Relevant"] / train_labels.value_counts()["Relevant"]
encoder = CountVectorizer(tokenizer=simple_tokenizer, ngram_range=(1, 1), lowercase=False, preprocessor=None,
min_df=best_params_cv['encoder__bow__vect__min_df'])
normalizer = TfidfTransformer(norm="l1", use_idf=False)
decoder = xgb.XGBClassifier(random_state=rd_seed,
n_estimators=148,
objective='binary:logistic',
learning_rate=0.1,
colsample_bytree=best_params_cv['clf__colsample_bytree'],
max_depth=best_params_cv['clf__max_depth'],
scale_pos_weight=balancing_factor, nthread=-1, n_jobs=-1)
enc_dec_pipe = Pipeline([
('encoder', FeatureUnion(transformer_list=[
('bow', Pipeline([
('selector', TextSelector('BoW_Ready', emb=False)),
('vect', encoder),
('norm', normalizer)
])
),
('abs', Pipeline([
('selector', TextSelector('embedding', emb=True))
]))
])),
('clf', decoder)
])
final_pipe = Pipeline([
('preprocess_and_embedder', preprocess_and_bert_pipe),
('bow_encoder_decoder', enc_dec_pipe)
])
t1 = time.time()
final_pipe.fit(train_data, train_labels)
t2 = time.time()
print("Overall time was: {}s, "
"which approximates {}s per instance".format(round(t2 - t1, 2), round((t2 - t1) / len(train_data), 2)))
return final_pipe
def predict_on_test(test_data: pd.DataFrame, test_labels: pd.DataFrame, optimal_pipeline: Pipeline) -> pd.DataFrame:
assert test_data.pmid.to_list() == test_labels.pmid.to_list()
test_labels = test_labels['label']
print("Predicting test instances, this might take a few minutes...")
pred_test = optimal_pipeline.predict(test_data)
test_results = pd.DataFrame(pred_test == test_labels.values, columns=['Result'])
accuracy = sum(test_results['Result'].values) / len(test_results)
test_results['pmid'] = test_data['pmid']
test_results['Correct label'] = test_labels
precision_test, recall_test, f1_test, _ = precision_recall_fscore_support(test_labels, pred_test,
average='binary',
pos_label="Relevant")
print("===== Final results on the test set ==== ")
print("Precision: {}\nRecall: {}\nF1: {}\nAccuracy: {}".format(precision_test, recall_test, f1_test, accuracy))
test_results.sort_values(by=['Result'])
return test_results
def run(path_train: str, train_labels: str, path_test: str, test_labels: str, cv_dir: str, output_dir: str,
train_pipeline: bool):
train_data = pd.read_parquet(path_train).sort_values(by=['pmid']).reset_index(drop=True)
test_data = pd.read_parquet(path_test).sort_values(by=['pmid']).reset_index(drop=True)
train_labels =
|
pd.read_csv(train_labels)
|
pandas.read_csv
|
"""
This module contains a collection of functions which make plots (saved as png files) using matplotlib, generated from
some model fits and cross-validation evaluation within a MAST-ML run.
This module also contains a method to create python notebooks containing plotted data and the relevant source code from
this module, to enable the user to make their own modifications to the created plots in a straightforward way (useful for
tweaking plots for a presentation or publication).
"""
import math
import statistics
import os
import copy
import pandas as pd
import itertools
import warnings
import logging
from collections import Iterable
from os.path import join
from collections import OrderedDict
from math import log, floor, ceil
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.ensemble._forest import _generate_sample_indices, _get_n_samples_bootstrap
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Ignore the harmless warning about the gelsd driver on mac.
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd")
# Ignore matplotlib deprecation warning (set as all warnings for now)
warnings.filterwarnings(action="ignore")
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure, figaspect
from matplotlib.animation import FuncAnimation
from matplotlib.font_manager import FontProperties
from scipy.stats import gaussian_kde, norm
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# Needed imports for ipynb_maker
#from mastml.utils import nice_range
#from mastml.metrics import nice_names
import inspect
import textwrap
from pandas import DataFrame, Series
import nbformat
from functools import wraps
import forestci as fci
from forestci.calibration import calibrateEB
import copy
matplotlib.rc('font', size=18, family='sans-serif') # set all font to bigger
matplotlib.rc('figure', autolayout=True) # turn on autolayout
# adding dpi as a constant global so it can be changed later
DPI = 250
#logger = logging.getLogger() # only used inside ipynb_maker I guess
# HEADERENDER don't delete this line, it's used by ipynb maker
logger = logging.getLogger('mastml') # the real logger
def ipynb_maker(plot_func):
"""
This method creates Jupyter Notebooks so user can modify and regenerate the plots produced by MAST-ML.
Args:
plot_func: (plot_helper method), a plotting method contained in plot_helper.py which contains the
@ipynb_maker decorator
Returns:
(plot_helper method), the same plot_func as used as input, but after having written the Jupyter notebook with source code to create plot
"""
from mastml import plot_helper # Strange self-import but it works, as had cyclic import issues with ipynb_maker as its own module
@wraps(plot_func)
def wrapper(*args, **kwargs):
# convert everything to kwargs for easier display
# from geniuses at https://stackoverflow.com/a/831164
#kwargs.update(dict(zip(plot_func.func_code.co_varnames, args)))
sig = inspect.signature(plot_func)
binding = sig.bind(*args, **kwargs)
all_args = binding.arguments
# if this is an outdir style function, fill in savepath and delete outdir
if 'savepath' in all_args:
ipynb_savepath = all_args['savepath']
knows_savepath = True
basename = os.path.basename(ipynb_savepath) # fix absolute path problem
elif 'outdir' in all_args:
knows_savepath = False
basename = plot_func.__name__
ipynb_savepath = os.path.join(all_args['outdir'], basename)
else:
raise Exception('you must have an "outdir" or "savepath" argument to use ipynb_maker')
readme = textwrap.dedent(f"""\
This notebook was automatically generated from your MAST-ML run so you can recreate the
plots. Some things are a bit different from the usual way of creating plots - we are
using the [object oriented
interface](https://matplotlib.org/tutorials/introductory/lifecycle.html) instead of
pyplot to create the `fig` and `ax` instances.
""")
# get source of the top of plot_helper.py
header = ""
with open(plot_helper.__file__) as f:
for line in f.readlines():
if 'HEADERENDER' in line:
break
header += line
core_funcs = [plot_helper.stat_to_string, plot_helper.plot_stats, plot_helper.make_fig_ax,
plot_helper.get_histogram_bins, plot_helper.nice_names, plot_helper.nice_range,
plot_helper.nice_mean, plot_helper.nice_std, plot_helper.rounder, plot_helper._set_tick_labels,
plot_helper._set_tick_labels_different, plot_helper._nice_range_helper, plot_helper._nearest_pow_ten,
plot_helper._three_sigfigs, plot_helper._n_sigfigs, plot_helper._int_if_int, plot_helper._round_up,
plot_helper.prediction_intervals]
func_strings = '\n\n'.join(inspect.getsource(func) for func in core_funcs)
plot_func_string = inspect.getsource(plot_func)
# remove first line that has this decorator on it (!!!)
plot_func_string = '\n'.join(plot_func_string.split('\n')[1:])
# put the arguments and their values in the code
arg_assignments = []
arg_names = []
for key, var in all_args.items():
if isinstance(var, DataFrame):
# this is amazing
arg_assignments.append(f"{key} = pd.read_csv(StringIO('''\n{var.to_csv(index=False)}'''))")
elif isinstance(var, Series):
arg_assignments.append(f"{key} = pd.Series(pd.read_csv(StringIO('''\n{var.to_csv(index=False)}''')).iloc[:,0])")
else:
arg_assignments.append(f'{key} = {repr(var)}')
arg_names.append(key)
args_block = ("from numpy import array\n" +
"from collections import OrderedDict\n" +
"from io import StringIO\n" +
"from sklearn.gaussian_process import GaussianProcessRegressor # Need for error plots\n" +
"from sklearn.gaussian_process.kernels import * # Need for error plots\n" +
"from sklearn.ensemble import RandomForestRegressor # Need for error plots\n" +
'\n'.join(arg_assignments))
arg_names = ', '.join(arg_names)
if knows_savepath:
if '.png' not in basename:
basename += '.png'
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
{plot_func.__name__}({arg_names})
display(Image(filename='{basename}'))
""")
else:
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
plot_paths = plot_predicted_vs_true(train_quad, test_quad, outdir, label)
for plot_path in plot_paths:
display(Image(filename=plot_path))
""")
nb = nbformat.v4.new_notebook()
readme_cell = nbformat.v4.new_markdown_cell(readme)
text_cells = [header, func_strings, plot_func_string, args_block, main]
cells = [readme_cell] + [nbformat.v4.new_code_cell(cell_text) for cell_text in text_cells]
nb['cells'] = cells
nbformat.write(nb, ipynb_savepath + '.ipynb')
return plot_func(*args, **kwargs)
return wrapper
def make_train_test_plots(run, path, is_classification, label, model, train_X, test_X, groups=None):
"""
General plotting method used to execute sequence of specific plots of train-test data analysis
Args:
run: (dict), a particular split_result from masml_driver
path: (str), path to save the generated plots and analysis of split_result designated in 'run'
is_classification: (bool), whether or not the analysis is a classification task
label: (str), name of the y data variable being fit
model: (scikit-learn model object), a scikit-learn model/estimator
train_X: (numpy array), array of X features used in training
test_X: (numpy array), array of X features used in testing
groups: (numpy array), array of group names
Returns:
None
"""
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_classification:
# Need these class prediction probabilities for ROC curve analysis
y_train_pred_proba = run['y_train_pred_proba']
y_test_pred_proba = run['y_test_pred_proba']
title = 'train_confusion_matrix'
plot_confusion_matrix(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title)
title = 'test_confusion_matrix'
plot_confusion_matrix(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title)
title = 'train_roc_curve'
plot_roc_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_roc_curve'
plot_roc_curve(y_test_true, y_test_pred_proba, join(path, title+'png'))
title = 'train_precision_recall_curve'
plot_precision_recall_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_precision_recall_curve'
plot_precision_recall_curve(y_test_true, y_test_pred_proba, join(path, title + 'png'))
else: # is_regression
plot_predicted_vs_true((y_train_true, y_train_pred, train_metrics, train_groups),
(y_test_true, y_test_pred, test_metrics, test_groups),
path, label=label)
title = 'train_residuals_histogram'
plot_residuals_histogram(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title, label=label)
title = 'test_residuals_histogram'
plot_residuals_histogram(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title, label=label)
def make_error_plots(run, path, is_classification, label, model, train_X, test_X, rf_error_method, rf_error_percentile,
is_validation, validation_column_name, validation_X, groups=None):
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_validation:
y_validation_pred, y_validation_true, prediction_metrics = \
run['y_validation_pred'+'_'+str(validation_column_name)], \
run['y_validation_true'+'_'+str(validation_column_name)], \
run['prediction_metrics']
if is_classification:
logger.debug('There is no error distribution plotting for classification problems, just passing through...')
else: # is_regression
#title = 'train_normalized_error'
#plot_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method, percentile,
# X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_normalized_error'
plot_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
#title = 'train_cumulative_normalized_error'
#plot_cumulative_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method,
# percentile, X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_cumulative_normalized_error'
plot_cumulative_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
# HERE, add your RMS residual vs. error plot function
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='test')
if is_validation:
title = 'validation_cumulative_normalized_error'
plot_cumulative_normalized_error(y_validation_true, y_validation_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
title = 'validation_normalized_error'
plot_normalized_error(y_validation_true, y_validation_pred, join(path, title + '.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='validation')
@ipynb_maker
def plot_confusion_matrix(y_true, y_pred, savepath, stats, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Method used to generate a confusion matrix for a classification run. Additional information can be found
at: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted confusion matrix
stats: (dict), dict of training or testing statistics for a particular run
normalize: (bool), whether or not to normalize data output as truncated float vs. double
title: (str), title of the confusion matrix plot
cmap: (matplotlib colormap), the color map to use for confusion matrix plotting
Returns:
None
"""
# calculate confusion matrix and lables in correct order
cm = confusion_matrix(y_true, y_pred)
#classes = sorted(list(set(y_true).intersection(set(y_pred))))
classes = sorted(list(set(y_true).union(set(y_pred))))
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# create the colorbar, not really needed but everyones got 'em
mappable = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#fig.colorbar(mappable)
# set x and y ticks to labels
tick_marks = range(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation='horizontal', fontsize=18)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation='horizontal', fontsize=18)
# draw number in the boxes
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plots the stats
plot_stats(fig, stats, x_align=0.60, y_align=0.90)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_roc_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the receiver-operator characteristic curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted ROC curve
Returns:
None
"""
#TODO: have work when probability=False in model params. Suggest user set probability=True!!
#classes = sorted(list(set(y_true).union(set(y_pred))))
#n_classes = y_pred.shape[1]
classes = list(np.unique(y_true))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.plot(fpr[1], tpr[1], color=colors[0], lw=2, label='ROC curve' + ' (area = %0.2f)' % roc_auc[1])
ax.plot([0, 1], [0, 1], color='black', label='Random guess', lw=2, linestyle='--')
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('False Positive Rate', fontsize='16')
ax.set_ylabel('True Positive Rate', fontsize='16')
ax.legend(loc="lower right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
@ipynb_maker
def plot_precision_recall_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the precision-recall curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
Returns:
None
"""
# Note this only works with probability predictions of the classifier labels.
classes = list(np.unique(y_true))
precision = dict()
recall = dict()
#roc_auc = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(y_true, y_pred[:, i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.step(recall[1], precision[1], color=colors[0], lw=2, label='Precision-recall curve')
#ax.fill_between(recall[1], precision[1], alpha=0.4, color=colors[0])
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('Recall', fontsize='16')
ax.set_ylabel('Precision', fontsize='16')
ax.legend(loc="upper right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
return
@ipynb_maker
def plot_residuals_histogram(y_true, y_pred, savepath,
stats, title='residuals histogram', label='residuals'):
"""
Method to calculate and plot the histogram of residuals from regression model
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
stats: (dict), dict of training or testing statistics for a particular run
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# do the actual plotting
residuals = y_true - y_pred
#Output residuals data and stats to spreadsheet
path = os.path.dirname(savepath)
pd.DataFrame(residuals).describe().to_csv(os.path.join(path,'residual_statistics.csv'))
pd.DataFrame(residuals).to_csv(path+'/'+'residuals.csv')
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=residuals)
ax.hist(residuals, bins=num_bins, color='b', edgecolor='k')
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
plot_stats(fig, pd.DataFrame(residuals).describe().to_dict()[0], x_align=x_align, y_align=0.60)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_target_histogram(y_df, savepath, title='target histogram', label='target values'):
"""
Method to plot the histogram of true y values
Args:
y_df: (pandas dataframe), dataframe of true y data values
savepath: (str), path to save the plotted precision-recall curve
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.70
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=x_align)
#ax.set_title(title)
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=y_df)
# do the actual plotting
try:
ax.hist(y_df, bins=num_bins, color='b', edgecolor='k')#, histtype='stepfilled')
except:
print('Could not plot target histgram')
return
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, dict(y_df.describe()), x_align=x_align, y_align=0.90, fontsize=14)
# Save input data stats to csv
savepath_parse = savepath.split('target_histogram.png')[0]
y_df.describe().to_csv(savepath_parse+'/''input_data_statistics.csv')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_predicted_vs_true(train_quad, test_quad, outdir, label):
"""
Method to create a parity plot (predicted vs. true values)
Args:
train_quad: (tuple), tuple containing 4 numpy arrays: true training y data, predicted training y data,
training metric data, and groups used in training
test_quad: (tuple), tuple containing 4 numpy arrays: true test y data, predicted test y data,
testing metric data, and groups used in testing
outdir: (str), path to save plots to
label: (str), label used for axis labeling
Returns:
None
"""
filenames = list()
y_train_true, y_train_pred, train_metrics, train_groups = train_quad
y_test_true, y_test_pred, test_metrics, test_groups = test_quad
# make diagonal line from absolute min to absolute max of any data point
# using round because Ryan did - but won't that ruin small numbers??? TODO this
#max1 = max(y_train_true.max(), y_train_pred.max(),
# y_test_true.max(), y_test_pred.max())
max1 = max(y_train_true.max(), y_test_true.max())
#min1 = min(y_train_true.min(), y_train_pred.min(),
# y_test_true.min(), y_test_pred.min())
min1 = min(y_train_true.min(), y_test_true.min())
max1 = round(float(max1), rounder(max1-min1))
min1 = round(float(min1), rounder(max1-min1))
for y_true, y_pred, stats, groups, title_addon in \
(train_quad+('train',), test_quad+('test',)):
# make fig and ax, use x_align when placing text so things don't overlap
x_align=0.64
fig, ax = make_fig_ax(x_align=x_align)
# set tick labels
# notice that we use the same max and min for all three. Don't
# calculate those inside the loop, because all the should be on the same scale and axis
_set_tick_labels(ax, max1, min1)
# plot diagonal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# do the actual plotting
if groups is None:
ax.scatter(y_true, y_pred, color='blue', edgecolors='black', s=100, zorder=2, alpha=0.7)
else:
handles = dict()
unique_groups = np.unique(np.concatenate((train_groups, test_groups), axis=0))
unique_groups_train = np.unique(train_groups)
unique_groups_test = np.unique(test_groups)
#logger.debug(' '*12 + 'unique groups: ' +str(list(unique_groups)))
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(unique_groups):
mask = groups == group
#logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.scatter(y_true[mask], y_pred[mask], label=group, color=colors[colorcount],
marker=markers[markercount], s=100, alpha=0.7)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
if title_addon == 'train':
to_delete = [k for k in handles.keys() if k not in unique_groups_train]
for k in to_delete:
del handles[k]
elif title_addon == 'test':
to_delete = [k for k in handles.keys() if k not in unique_groups_test]
for k in to_delete:
del handles[k]
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
filename = 'predicted_vs_true_'+ title_addon + '.png'
filenames.append(filename)
fig.savefig(join(outdir, filename), dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y_pred': y_pred, 'y_true': y_true})
df.to_csv(join(outdir, 'predicted_vs_true_' + title_addon + '.csv'))
return filenames
def plot_scatter(x, y, savepath, groups=None, xlabel='x', label='target data'):
"""
Method to create a general scatter plot
Args:
x: (numpy array), array of x data
y: (numpy array), array of y data
savepath: (str), path to save plots to
groups: (list), list of group labels
xlabel: (str), label used for x-axis labeling
label: (str), label used for y-axis labeling
Returns:
None
"""
# Set image aspect ratio:
fig, ax = make_fig_ax()
# set tick labels
max_tick_x = max(x)
min_tick_x = min(x)
max_tick_y = max(y)
min_tick_y = min(y)
max_tick_x = round(float(max_tick_x), rounder(max_tick_x-min_tick_x))
min_tick_x = round(float(min_tick_x), rounder(max_tick_x-min_tick_x))
max_tick_y = round(float(max_tick_y), rounder(max_tick_y-min_tick_y))
min_tick_y = round(float(min_tick_y), rounder(max_tick_y-min_tick_y))
#divisor_y = get_divisor(max(y), min(y))
#max_tick_y = round_up(max(y), divisor_y)
#min_tick_y = round_down(min(y), divisor_y)
_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
if groups is None:
ax.scatter(x, y, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(np.unique(groups)):
mask = groups == group
ax.scatter(x[mask], y[mask], label=group, color=colors[colorcount], marker=markers[markercount], s=100, alpha=0.7)
ax.legend(loc='lower right', fontsize=12)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel('Value of '+label, fontsize=16)
#ax.set_xticklabels(rotation=45)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
def plot_keras_history(model_history, savepath, plot_type):
# Set image aspect ratio:
fig, ax = make_fig_ax()
keys = model_history.history.keys()
for k in keys:
if 'loss' not in k and 'val' not in k:
metric = k
accuracy = model_history.history[str(metric)]
loss = model_history.history['loss']
if plot_type == 'accuracy':
ax.plot(accuracy, label='training '+str(metric))
ax.set_ylabel(str(metric)+' (Accuracy)', fontsize=16)
try:
validation_accuracy = model_history.history['val_'+str(metric)]
ax.plot(validation_accuracy, label='validation '+str(metric))
except:
pass
if plot_type == 'loss':
ax.plot(loss, label='training loss')
ax.set_ylabel(str(metric)+' (Loss)', fontsize=16)
try:
validation_loss = model_history.history['val_loss']
ax.plot(validation_loss, label='validation loss')
except:
pass
ax.legend(loc='upper right', fontsize=12)
#_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
ax.set_xlabel('Epochs', fontsize=16)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_best_worst_split(y_true, best_run, worst_run, savepath,
title='Best Worst Overlay', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of just the best scoring and worst scoring CV splits
Args:
y_true: (numpy array), array of true y data
best_run: (dict), the best scoring split_result from mastml_driver
worst_run: (dict), the worst scoring split_result from mastml_driver
savepath: (str), path to save plots to
title: (str), title of the best_worst_split plot
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
maxx = max(y_true) # TODO is round the right thing here?
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set tick labels
_set_tick_labels(ax, maxx, minn)
# do the actual plotting
ax.scatter(best_run['y_test_true'], best_run['y_test_pred'], c='red',
alpha=0.7, label='best test', edgecolor='darkred', zorder=2, s=100)
ax.scatter(worst_run['y_test_true'], worst_run['y_test_pred'], c='blue',
alpha=0.7, label='worst test', edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
#font_dict = {'size' : 10, 'family' : 'sans-serif'}
# Duplicate the stats dicts with an additional label
best_stats = OrderedDict([('Best Run', None)])
best_stats.update(best_run['test_metrics'])
worst_stats = OrderedDict([('worst Run', None)])
worst_stats.update(worst_run['test_metrics'])
plot_stats(fig, best_stats, x_align=x_align, y_align=0.90)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.60)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df_best = pd.DataFrame({'best run pred': best_run['y_test_pred'], 'best run true': best_run['y_test_true']})
df_worst = pd.DataFrame({'worst run pred': worst_run['y_test_pred'], 'worst run true': worst_run['y_test_true']})
df_best.to_csv(savepath + '_best.csv')
df_worst.to_csv(savepath + '_worst.csv')
@ipynb_maker
def plot_best_worst_per_point(y_true, y_pred_list, savepath, metrics_dict,
avg_stats, title='best worst per point', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of the set of best and worst CV scores for each
individual data point.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
savepath: (str), path to save plots to
metrics_dict: (dict), dict of scikit-learn metric objects to calculate score of predicted vs. true values
avg_stats: (dict), dict of calculated average metrics over all CV splits
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
worsts = []
bests = []
new_y_true = []
for yt, y_pred in zip(y_true, y_pred_list):
if len(y_pred) == 0 or np.nan in y_pred_list or yt == np.nan:
continue
worsts.append(max(y_pred, key=lambda yp: abs(yp-yt)))
bests.append( min(y_pred, key=lambda yp: abs(yp-yt)))
new_y_true.append(yt)
worst_stats = OrderedDict([('Worst combined:', None)])
best_stats = OrderedDict([('Best combined:', None)])
for name, (_, func) in metrics_dict.items():
worst_stats[name] = func(new_y_true, worsts)
best_stats[name] = func(new_y_true, bests)
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 15.5/24 #mmm yum
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
#all_vals = [val for val in worsts+bests if val is not None]
max1 = max(y_true)
min1 = min(y_true)
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx = max((max(bests), max(worsts), max(new_y_true)))
#minn = min((min(bests), min(worsts), min(new_y_true)))
#maxx, minn = recursive_max_and_min([bests, worsts, new_y_true])
maxx = round(float(max1), rounder(max1-min1))
minn = round(float(min1), rounder(max1-min1))
_set_tick_labels(ax, maxx, minn)
ax.scatter(new_y_true, bests, c='red', alpha=0.7, label='best test',
edgecolor='darkred', zorder=2, s=100)
ax.scatter(new_y_true, worsts, c='blue', alpha=0.7, label='worst test',
edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.51, fontsize=10)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.73, fontsize=10)
plot_stats(fig, best_stats, x_align=x_align, y_align=0.95, fontsize=10)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': new_y_true,
'best per point': bests,
'worst per point': worsts})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_predicted_vs_true_bars(y_true, y_pred_list, avg_stats,
savepath, title='best worst with bars', label='target_value', groups=None):
"""
Method to calculate parity plot (predicted vs. true) of average predictions, averaged over all CV splits, with error
bars on each point corresponding to the standard deviation of the predicted values over all CV splits.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
means = [nice_mean(y_pred) for y_pred in y_pred_list]
standard_error_means = [nice_std(y_pred)/np.sqrt(len(y_pred))
for y_pred in y_pred_list]
standard_errors = [nice_std(y_pred) for y_pred in y_pred_list]
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
max1 = max(np.nanmax(y_true), np.nanmax(means))
min1 = min(np.nanmin(y_true), np.nanmin(means))
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx, minn = recursive_max_and_min([means, y_true])
maxx = max(y_true)
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
#print(maxx, minn, rounder(maxx - minn))
_set_tick_labels(ax, maxx, minn)
if groups is None:
ax.errorbar(y_true, means, yerr=standard_errors, fmt='o', markerfacecolor='blue', markeredgecolor='black', markersize=10,
alpha=0.7, capsize=3)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
handles = dict()
unique_groups = np.unique(groups)
for groupcount, group in enumerate(unique_groups):
mask = groups == group
# logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.errorbar(y_true[mask], np.array(means)[mask], yerr=np.array(standard_errors)[mask],
marker=markers[markercount], markerfacecolor=colors[colorcount],
markeredgecolor=colors[colorcount], ecolor=colors[colorcount],
markersize=10, alpha=0.7, capsize=3, fmt='o')
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=10)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': y_true,
'average predicted values': means,
'error bar values': standard_errors})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_metric_vs_group(metric, groups, stats, avg_stats, savepath):
"""
Method to plot the value of a particular calculated metric (e.g. RMSE, R^2, etc) for each data group
Args:
metric: (str), name of a calculation metric
groups: (numpy array), array of group names
stats: (dict), dict of training or testing statistics for a particular run
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# do the actual plotting
ax.scatter(groups, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group', fontsize=16)
ax.set_ylabel(metric, fontsize=16)
ax.set_xticklabels(labels=groups, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
# Save data stats to csv
savepath_parse = savepath.split(str(metric)+'_vs_group.png')[0]
pd.DataFrame(groups, stats).to_csv(os.path.join(savepath_parse, str(metric)+'_vs_group.csv'))
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_metric_vs_group_size(metric, groups, stats, avg_stats, savepath):
"""
Method to plot the value of a particular calculated metric (e.g. RMSE, R^2, etc) as a function of the size of each group.
Args:
metric: (str), name of a calculation metric
groups: (numpy array), array of group names
stats: (dict), dict of training or testing statistics for a particular run
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# Get unique groups from full group array
unique_groups = np.unique(groups)
# Get the size of each group
group_lengths = list()
for group in unique_groups:
group_lengths.append(len(np.concatenate(np.where(groups==group)).tolist()))
# do the actual plotting
ax.scatter(group_lengths, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group size', fontsize=16)
ax.set_ylabel(metric, fontsize=16)
#ax.set_xticklabels(labels=group_lengths, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
# Save data stats to csv
savepath_parse = savepath.split(str(metric)+'_vs_group_size.png')[0]
pd.DataFrame(group_lengths, stats).to_csv(os.path.join(savepath_parse, str(metric)+'_vs_group_size.csv'))
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
# Credit to: http://contrib.scikit-learn.org/forest-confidence-interval/_modules/forestci/forestci.html
def calc_inbag_modified(n_samples, forest, is_ensemble):
"""
Derive samples used to create trees in scikit-learn RandomForest objects.
Recovers the samples in each tree from the random state of that tree using
:func:`forest._generate_sample_indices`.
Parameters
----------
n_samples : int
The number of samples used to fit the scikit-learn RandomForest object.
forest : RandomForest
Regressor or Classifier object that is already fit by scikit-learn.
Returns
-------
Array that records how many times a data point was placed in a tree.
Columns are individual trees. Rows are the number of times a sample was
used in a tree.
"""
if not forest.bootstrap:
e_s = "Cannot calculate the inbag from a forest that has "
e_s = " bootstrap=False"
raise ValueError(e_s)
n_trees = forest.n_estimators
inbag = np.zeros((n_samples, n_trees))
sample_idx = []
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, n_samples
)
for t_idx in range(n_trees):
if not is_ensemble:
sample_idx.append(
_generate_sample_indices(forest.estimators_[t_idx].random_state,
n_samples, n_samples_bootstrap))
inbag[:, t_idx] = np.bincount(sample_idx[-1], minlength=n_samples)
else:
sample_idx = forest.bootstrapped_idxs[t_idx]
inbag[:, t_idx] = np.bincount(sample_idx, minlength=n_samples)
return inbag
# Credit to: http://contrib.scikit-learn.org/forest-confidence-interval/_modules/forestci/forestci.html
def random_forest_error_modified(forest, is_ensemble, X_train, X_test, basic_IJ=False,inbag=None,
calibrate=True, memory_constrained=False,
memory_limit=None):
"""
Calculate error bars from scikit-learn RandomForest estimators.
RandomForest is a regressor or classifier object
this variance can be used to plot error bars for RandomForest objects
Parameters
----------
forest : RandomForest
Regressor or Classifier object.
X_train : ndarray
An array with shape (n_train_sample, n_features). The design matrix for
training data.
X_test : ndarray
An array with shape (n_test_sample, n_features). The design matrix
for testing data
basic_IJ : boolean, optional
Return the value of basic infinitesimal jackknife or Monte Carlo
corrected infinitesimal jackknife.
inbag : ndarray, optional
The inbag matrix that fit the data. If set to `None` (default) it
will be inferred from the forest. However, this only works for trees
for which bootstrapping was set to `True`. That is, if sampling was
done with replacement. Otherwise, users need to provide their own
inbag matrix.
calibrate: boolean, optional
Whether to apply calibration to mitigate Monte Carlo noise.
Some variance estimates may be negative due to Monte Carlo effects if
the number of trees in the forest is too small. To use calibration,
Default: True
memory_constrained: boolean, optional
Whether or not there is a restriction on memory. If False, it is
assumed that a ndarry of shape (n_train_sample,n_test_sample) fits
in main memory. Setting to True can actually provide a speed up if
memory_limit is tuned to the optimal range.
memory_limit: int, optional.
An upper bound for how much memory the itermediate matrices will take
up in Megabytes. This must be provided if memory_constrained=True.
Returns
-------
An array with the unbiased sampling variance (V_IJ_unbiased)
for a RandomForest object.
See Also
----------
:func:`calc_inbag`
Notes
-----
The calculation of error is based on the infinitesimal jackknife variance,
as described in [Wager2014]_ and is a Python implementation of the R code
provided at: https://github.com/swager/randomForestCI
.. [Wager2014] <NAME>, <NAME>, <NAME>. "Confidence Intervals for
Random Forests: The Jackknife and the Infinitesimal Jackknife", Journal
of Machine Learning Research vol. 15, pp. 1625-1651, 2014.
"""
if inbag is None:
inbag = calc_inbag_modified(X_train.shape[0], forest, is_ensemble)
if not is_ensemble:
pred = np.array([tree.predict(X_test) for tree in forest]).T
else:
pred = np.array([tree.predict(X_test) for tree in forest.model]).T
pred = pred[0]
pred_mean = np.mean(pred, 0)
pred_centered = pred - pred_mean
n_trees = forest.n_estimators
V_IJ = fci._core_computation(X_train, X_test, inbag, pred_centered, n_trees,
memory_constrained, memory_limit)
V_IJ_unbiased = fci._bias_correction(V_IJ, inbag, pred_centered, n_trees)
# Correct for cases where resampling is done without replacement:
if np.max(inbag) == 1:
variance_inflation = 1 / (1 - np.mean(inbag)) ** 2
V_IJ_unbiased *= variance_inflation
if basic_IJ:
return V_IJ
if not calibrate:
return V_IJ_unbiased
if V_IJ_unbiased.shape[0] <= 20:
print("No calibration with n_samples <= 20")
return V_IJ_unbiased
if calibrate:
calibration_ratio = 2
n_sample = np.ceil(n_trees / calibration_ratio)
new_forest = copy.deepcopy(forest)
if not is_ensemble:
new_forest.estimators_ =\
np.random.permutation(new_forest.estimators_)[:int(n_sample)]
else:
new_forest.model =\
np.random.permutation(new_forest.model)[:int(n_sample)]
new_forest.n_estimators = int(n_sample)
results_ss = random_forest_error_modified(new_forest, is_ensemble, X_train, X_test,
calibrate=False,
memory_constrained=memory_constrained,
memory_limit=memory_limit)
# Use this second set of variance estimates
# to estimate scale of Monte Carlo noise
sigma2_ss = np.mean((results_ss - V_IJ_unbiased)**2)
delta = n_sample / n_trees
sigma2 = (delta**2 + (1 - delta)**2) / (2 * (1 - delta)**2) * sigma2_ss
# Use Monte Carlo noise scale estimate for empirical Bayes calibration
V_IJ_calibrated = fci.calibration.calibrateEB(V_IJ_unbiased, sigma2)
return V_IJ_calibrated
def prediction_intervals(model, X, rf_error_method, rf_error_percentile, Xtrain, Xtest):
"""
Method to calculate prediction intervals when using Random Forest and Gaussian Process regression models.
Prediction intervals for random forest adapted from https://blog.datadive.net/prediction-intervals-for-random-forests/
Args:
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
method: (str), type of error bar to formulate (e.g. "stdev" is standard deviation of predicted errors, "confint"
is error bar as confidence interval
percentile: (int), percentile for which to form error bars
Returns:
err_up: (list), list of upper bounds of error bars for each data point
err_down: (list), list of lower bounds of error bars for each data point
"""
err_down = list()
err_up = list()
nan_indices = list()
indices_TF = list()
X_aslist = X.values.tolist()
if model.__class__.__name__ in ['RandomForestRegressor', 'GradientBoostingRegressor', 'ExtraTreesRegressor', 'EnsembleRegressor']:
if rf_error_method == 'jackknife_calibrated':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=True)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=True)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
elif rf_error_method == 'jackknife_uncalibrated':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=False)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=False, calibrate=False)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
elif rf_error_method == 'jackknife_basic':
if 'EnsembleRegressor' in model.__class__.__name__:
rf_variances = random_forest_error_modified(model, True, X_train=Xtrain, X_test=Xtest, basic_IJ=True, calibrate=False)
else:
rf_variances = random_forest_error_modified(model, False, X_train=Xtrain, X_test=Xtest, basic_IJ=True, calibrate=False)
rf_stdevs = np.sqrt(rf_variances)
nan_indices = np.where(np.isnan(rf_stdevs))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(rf_stdevs)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
rf_stdevs = rf_stdevs[~np.isnan(rf_stdevs)]
err_up = err_down = rf_stdevs
else:
for x in range(len(X_aslist)):
preds = list()
if model.__class__.__name__ == 'RandomForestRegressor':
for pred in model.estimators_:
preds.append(pred.predict(np.array(X_aslist[x]).reshape(1,-1))[0])
elif model.__class__.__name__ == 'GradientBoostingRegressor':
for pred in model.estimators_.tolist():
preds.append(pred[0].predict(np.array(X_aslist[x]).reshape(1,-1))[0])
elif model.__class__.__name__ == 'EnsembleRegressor':
for pred in model.model:
preds.append(pred.predict(np.array(X_aslist[x]).reshape(1,-1))[0])
if rf_error_method == 'confint':
#e_down = np.percentile(preds, (100 - int(rf_error_percentile)) / 2.)
#e_up = np.percentile(preds, 100 - (100 - int(rf_error_percentile)) / 2.)
e_down = np.percentile(preds, float(rf_error_percentile))
e_up = np.percentile(preds, float(rf_error_percentile))
elif rf_error_method == 'stdev':
e_down = np.std(preds)
e_up = np.std(preds)
elif rf_error_method == 'False' or rf_error_method is False:
# basically default to stdev
e_down = np.std(preds)
e_up = np.std(preds)
else:
raise ValueError('rf_error_method must be one of ["stdev", "confint", "jackknife_basic", "jackknife_calibrated", "jackknife_uncalibrated"]')
#if e_up == 0.0:
# e_up = 10 ** 10
#if e_down == 0.0:
# e_down = 10 ** 10
err_down.append(e_down)
err_up.append(e_up)
nan_indices = np.where(np.isnan(err_up))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(err_up)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
if model.__class__.__name__=='GaussianProcessRegressor':
preds = model.predict(X, return_std=True)[1] # Get the stdev model error from the predictions of GPR
err_up = preds
err_down = preds
nan_indices = np.where(np.isnan(err_up))
nan_indices_sorted = np.array(sorted(nan_indices[0], reverse=True))
for i, val in enumerate(list(err_up)):
if i in nan_indices_sorted:
indices_TF.append(False)
else:
indices_TF.append(True)
return err_down, err_up, nan_indices, np.array(indices_TF)
@ipynb_maker
def plot_normalized_error(y_true, y_pred, savepath, model, rf_error_method, rf_error_percentile, X=None, Xtrain=None,
Xtest=None):
"""
Method to plot the normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
path = os.path.dirname(savepath)
# Here: if model is random forest or Gaussian process, get real error bars. Else, just residuals
model_name = model.__class__.__name__
# TODO: also add support for Gradient Boosted Regressor
models_with_error_predictions = ['RandomForestRegressor', 'GaussianProcessRegressor', 'GradientBoostingRegressor', 'EnsembleRegressor']
has_model_errors = False
y_pred_ = y_pred
y_true_ = y_true
if model_name in models_with_error_predictions:
has_model_errors = True
err_down, err_up, nan_indices, indices_TF = prediction_intervals(model, X, rf_error_method=rf_error_method,
rf_error_percentile=rf_error_percentile, Xtrain=Xtrain, Xtest=Xtest)
# Correct for nan indices being present
if has_model_errors:
y_pred_ = y_pred_[indices_TF]
y_true_ = y_true_[indices_TF]
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
mu = 0
sigma = 1
residuals = (y_true_ - y_pred_)
normalized_residuals = (y_true_-y_pred_)/np.std(y_true_-y_pred_)
density_residuals = gaussian_kde(normalized_residuals)
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, y_true_.shape[0])
ax.plot(x, norm.pdf(x, mu, sigma), linewidth=4, color='blue', label="Analytical Gaussian")
ax.plot(x, density_residuals(x), linewidth=4, color='green', label="Model Residuals")
maxx = 5
minn = -5
if has_model_errors:
err_avg = [(abs(e1)+abs(e2))/2 for e1, e2 in zip(err_up, err_down)]
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = (y_true_-y_pred_)/err_avg
density_errors = gaussian_kde(model_errors)
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)), max(density_errors(x)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)), max(density_errors(x)))
ax.plot(x, density_errors(x), linewidth=4, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true_, "Y Pred": y_pred_, "Plotted x values": x, "error_bars_up": err_up,
"error_bars_down": err_down, "error_avg": err_avg,
"analytical gaussian (plotted y blue values)": norm.pdf(x, mu, sigma),
"model residuals": residuals,
"model normalized residuals (plotted y green values)": density_residuals(x),
"model errors (plotted y purple values)": density_errors(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x values": x, "analytical gaussian": norm.pdf(x, mu, sigma),
"model residuals": density_residuals(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)))
ax.legend(loc=0, fontsize=12, frameon=False)
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Probability density", fontsize=18)
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_cumulative_normalized_error(y_true, y_pred, savepath, model, rf_error_method, rf_error_percentile, X=None,
Xtrain=None, Xtest=None):
"""
Method to plot the cumulative normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted cumulative normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
# Here: if model is random forest or Gaussian process, get real error bars. Else, just residuals
model_name = model.__class__.__name__
models_with_error_predictions = ['RandomForestRegressor', 'GaussianProcessRegressor', 'GradientBoostingRegressor', 'EnsembleRegressor']
has_model_errors = False
y_pred_ = y_pred
y_true_ = y_true
if model_name in models_with_error_predictions:
has_model_errors = True
err_down, err_up, nan_indices, indices_TF = prediction_intervals(model, X, rf_error_method=rf_error_method,
rf_error_percentile=rf_error_percentile, Xtrain=Xtrain, Xtest=Xtest)
# Need to remove NaN's before plotting. These will be present when doing validation runs. Note NaN's only show up in y_pred_
# Correct for nan indices being present
if has_model_errors:
y_pred_ = y_pred_[indices_TF]
y_true_ = y_true_[indices_TF]
y_true_ = y_true_[~np.isnan(y_pred_)]
y_pred_ = y_pred_[~np.isnan(y_pred_)]
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
analytic_gau = np.random.normal(0, 1, 10000)
analytic_gau = abs(analytic_gau)
n_analytic = np.arange(1, len(analytic_gau) + 1) / np.float(len(analytic_gau))
X_analytic = np.sort(analytic_gau)
residuals = y_true_-y_pred_
normalized_residuals = abs((y_true_-y_pred_)/np.std(y_true_-y_pred_))
n_residuals = np.arange(1, len(normalized_residuals) + 1) / np.float(len(normalized_residuals))
X_residuals = np.sort(normalized_residuals) #r"$\mathrm{Predicted \/ Value}, \mathit{eV}$"
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Fraction", fontsize=18)
ax.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
ax.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
ax.set_xlim([0, 5])
if has_model_errors:
err_avg = [(abs(e1)+abs(e2))/2 for e1, e2 in zip(err_up, err_down)]
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = abs((y_true_-y_pred_)/err_avg)
n_errors = np.arange(1, len(model_errors) + 1) / np.float(len(model_errors))
X_errors = np.sort(model_errors)
ax.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "Analytical Gaussian values": analytic_gau, "Analytical Gaussian (sorted, blue data)": X_analytic,
"model residuals": residuals,
"Model normalized residuals": normalized_residuals, "Model Residuals (sorted, green data)": X_residuals,
"error_bars_up": err_up, "error_bars_down": err_down,
"Model error values (r value: (ytrue-ypred)/(model error avg))": model_errors,
"Model errors (sorted, purple values)": X_errors}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x analytical": X_analytic, "analytical gaussian": n_analytic, "x residuals": X_residuals,
"model residuals": n_residuals}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
ax.legend(loc=0, fontsize=14, frameon=False)
xlabels = np.linspace(2, 3, 3)
ylabels = np.linspace(0.9, 1, 2)
axin = zoomed_inset_axes(ax, 2.5, loc=7)
axin.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
axin.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
if has_model_errors:
axin.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
axin.set_xticklabels(xlabels, fontsize=8, rotation=90)
axin.set_yticklabels(ylabels, fontsize=8)
axin.set_xlim([2, 3])
axin.set_ylim([0.9, 1])
maxx = 5
minn = 0
maxy = 1.1
miny = 0
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
mark_inset(ax, axin, loc1=1, loc2=2)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_average_cumulative_normalized_error(y_true, y_pred, savepath, has_model_errors, err_avg=None):
"""
Method to plot the cumulative normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted cumulative normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
analytic_gau = np.random.normal(0, 1, 10000)
analytic_gau = abs(analytic_gau)
n_analytic = np.arange(1, len(analytic_gau) + 1) / np.float(len(analytic_gau))
X_analytic = np.sort(analytic_gau)
residuals = y_true-y_pred
residuals = residuals[~np.isnan(residuals)]
normalized_residuals = abs((y_true-y_pred)/np.std(y_true-y_pred))
n_residuals = np.arange(1, len(normalized_residuals) + 1) / np.float(len(normalized_residuals))
X_residuals = np.sort(normalized_residuals) #r"$\mathrm{Predicted \/ Value}, \mathit{eV}$"
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Fraction", fontsize=18)
ax.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
ax.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
ax.set_xlim([0, 5])
if has_model_errors:
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = abs((y_true-y_pred)/err_avg)
model_errors = model_errors[~np.isnan(model_errors)]
n_errors = np.arange(1, len(model_errors) + 1) / np.float(len(model_errors))
X_errors = np.sort(model_errors)
ax.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "Analytical Gaussian values": analytic_gau,
"Analytical Gaussian (sorted, blue data)": X_analytic,
"model residuals": residuals,
"Model normalized residuals": normalized_residuals, "Model Residuals (sorted, green data)": X_residuals,
"Model error values (r value: (ytrue-ypred)/(model error avg))": model_errors,
"Model errors (sorted, purple values)": X_errors}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x analytical": X_analytic, "analytical gaussian": n_analytic,
"x residuals": X_residuals, "model residuals": n_residuals}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame.from_dict(data_dict, orient='index')
df = df.transpose()
df.to_csv(savepath.split('.png')[0]+'.csv', index=False)
ax.legend(loc=0, fontsize=14, frameon=False)
xlabels = np.linspace(2, 3, 3)
ylabels = np.linspace(0.9, 1, 2)
axin = zoomed_inset_axes(ax, 2.5, loc=7)
axin.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
axin.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
if has_model_errors:
axin.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
axin.set_xticklabels(xlabels, fontsize=8, rotation=90)
axin.set_yticklabels(ylabels, fontsize=8)
axin.set_xlim([2, 3])
axin.set_ylim([0.9, 1])
maxx = 5
minn = 0
maxy = 1.1
miny = 0
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
mark_inset(ax, axin, loc1=1, loc2=2)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_average_normalized_error(y_true, y_pred, savepath, has_model_errors, err_avg=None):
"""
Method to plot the normalized residual errors of a model prediction
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted normalized error plot
model: (scikit-learn model/estimator object), a scikit-learn model object
X: (numpy array), array of X features
avg_stats: (dict), dict of calculated average metrics over all CV splits
Returns:
None
"""
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
mu = 0
sigma = 1
residuals = y_true - y_pred
residuals = residuals[~np.isnan(residuals)]
normalized_residuals = (y_true-y_pred)/np.std(y_true-y_pred)
density_residuals = gaussian_kde(normalized_residuals)
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, y_true.shape[0])
ax.plot(x, norm.pdf(x, mu, sigma), linewidth=4, color='blue', label="Analytical Gaussian")
ax.plot(x, density_residuals(x), linewidth=4, color='green', label="Model Residuals")
maxx = 5
minn = -5
if has_model_errors:
nans = np.argwhere(np.isnan(err_avg)).tolist()
nans = np.squeeze(nans)
if nans.size:
err_avg[nans] = 0.0
err_avg = np.asarray(err_avg)
err_avg[err_avg==0.0] = 0.0001
err_avg = err_avg.tolist()
model_errors = (y_true-y_pred)/err_avg
model_errors = model_errors[~np.isnan(model_errors)]
density_errors = gaussian_kde(model_errors)
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)), max(density_errors(x)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)), min(density_errors(x)))
ax.plot(x, density_errors(x), linewidth=4, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "Plotted x values": x, "Model errors": err_avg,
"analytical gaussian (plotted y blue values)": norm.pdf(x, mu, sigma),
"model residuals": residuals,
"model normalized residuals (plotted y green values)": density_residuals(x),
"model errors (plotted y purple values)": density_errors(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
else:
# Save data to csv file
data_dict = {"Y True": y_true, "Y Pred": y_pred, "x values": x, "analytical gaussian": norm.pdf(x, mu, sigma),
"model residuals": density_residuals(x)}
pd.DataFrame(data_dict).to_csv(savepath.split('.png')[0]+'.csv')
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)))
ax.legend(loc=0, fontsize=12, frameon=False)
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Probability density", fontsize=18)
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
def plot_real_vs_predicted_error(y_true, savepath, model, data_test_type):
bin_values, rms_residual_values, num_values_per_bin = parse_error_data(dataset_stdev=np.std(y_true),
path_to_test=savepath,
data_test_type=data_test_type)
model_name = model.__class__.__name__
if model_name == 'RandomForestRegressor':
model_type = 'RF'
elif model_name == 'GradientBoostingRegressor':
model_type = 'GBR'
elif model_name == 'ExtraTreesRegressor':
model_type = 'ET'
elif model_name == 'GaussianProcessRegressor':
model_type = 'GPR'
elif model_name == 'EnsembleRegressor':
model_type = 'ER'
if data_test_type not in ['test', 'validation']:
print('Error: data_test_type must be one of "test" or "validation"')
exit()
# Make RF error plot
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=0.65)
ax.scatter(bin_values[0:10], rms_residual_values[0:10], s=100, color='blue', alpha=0.7)
ax.scatter(bin_values[10:], rms_residual_values[10:], s=100, color='red', alpha=0.7)
ax.set_xlabel(str(model_type) + ' model errors / dataset stdev', fontsize=12)
ax.set_ylabel('RMS Absolute residuals\n / dataset stdev', fontsize=12)
ax.tick_params(labelsize=10)
linear_int = LinearRegression(fit_intercept=False)
linear = LinearRegression(fit_intercept=True)
# Fit just blue circle data
# Find nan entries
nans = np.argwhere(np.isnan(rms_residual_values)).tolist()
# use nans (which are indices) to delete relevant parts of bin_values and
# rms_residual_values as they can't be used to fit anyway
bin_values_copy = np.empty_like(bin_values)
bin_values_copy[:] = bin_values
rms_residual_values_copy = np.empty_like(rms_residual_values)
rms_residual_values_copy[:] = rms_residual_values
bin_values_copy = np.delete(bin_values_copy, nans)
rms_residual_values_copy = np.delete(rms_residual_values_copy, nans)
# BEGIN OLD CODE
# --------------
#lowval = 0
#if len(nans) > 0:
# if nans[0][0] == 0:
# nans = nans[1:]
# lowval = 1
# if len(nans) > 0:
# if nans[0][0] == 1:
# nans = nans[1:]
# lowval = 2
# if len(nans) > 0:
# if nans[0][0] == 2:
# nans = nans[1:]
# lowval = 3
# if len(nans) > 0:
# if nans[0][0] == 3:
# nans = nans[1:]
# lowval = 4
#try:
# val = min(nans)[0]
#except ValueError:
# val = 10
#if val > 10:
# val = 10
#linear.fit(np.array(bin_values[lowval:val]).reshape(-1, 1), rms_residual_values[lowval:val])
#yfit = linear.predict(np.array(bin_values[lowval:val]).reshape(-1, 1))
#ax.plot(bin_values[lowval:val], yfit, 'k--', linewidth=2)
#slope = linear.coef_
#r2 = r2_score(rms_residual_values[lowval:val], yfit)
# --------------
if not rms_residual_values_copy.size:
print("---WARNING: ALL ERRORS TOO LARGE FOR PLOTTING---")
else:
linear_int.fit(np.array(bin_values_copy).reshape(-1, 1), rms_residual_values_copy)
linear.fit(np.array(bin_values_copy).reshape(-1, 1), rms_residual_values_copy)
yfit_int = linear_int.predict(np.array(bin_values_copy).reshape(-1, 1))
yfit = linear.predict(np.array(bin_values_copy).reshape(-1, 1))
ax.plot(bin_values_copy, yfit_int, 'r--', linewidth=2)
ax.plot(bin_values_copy, yfit, 'k--', linewidth=2)
slope_int = linear_int.coef_
r2_int = r2_score(rms_residual_values_copy, yfit_int)
slope = linear.coef_
r2 = r2_score(rms_residual_values_copy, yfit)
ax.text(0.02, 1.2, 'intercept slope = %3.2f ' % slope_int, fontsize=12, fontdict={'color': 'r'})
ax.text(0.02, 1.1, 'intercept R$^2$ = %3.2f ' % r2_int, fontsize=12, fontdict={'color': 'r'})
ax.text(0.02, 1.0, 'slope = %3.2f ' % slope, fontsize=12, fontdict={'color': 'k'})
ax.text(0.02, 0.9, 'R$^2$ = %3.2f ' % r2, fontsize=12, fontdict={'color': 'k'})
divider = make_axes_locatable(ax)
axbarx = divider.append_axes("top", 1.2, pad=0.12, sharex=ax)
axbarx.bar(x=bin_values, height=num_values_per_bin, width=0.05276488, color='blue', edgecolor='black',
alpha=0.7)
axbarx.tick_params(labelsize=10, axis='y')
axbarx.tick_params(labelsize=0, axis='x')
axbarx.set_ylabel('Counts', fontsize=12)
total_samples = sum(num_values_per_bin)
axbarx.text(0.95, round(0.67 * max(num_values_per_bin)), 'Total counts = ' + str(total_samples), fontsize=12)
ax.set_ylim(bottom=0, top=max(1.3, max(rms_residual_values)))
axbarx.set_ylim(bottom=0, top=max(num_values_per_bin) + 50)
ax.set_xlim(left=0, right=max(max(bin_values_copy) + 0.05, 1.6))
fig.savefig(
os.path.join(savepath.split('.png')[0], str(model_type) + '_residuals_vs_modelerror_' + str(data_test_type) + '.png'),
dpi=300, bbox_inches='tight')
return
def parse_error_data(dataset_stdev, path_to_test, data_test_type):
if data_test_type not in ['test', 'validation']:
print('Error: data_test_type must be one of "test" or "validation"')
exit()
dfs_ytrue = list()
dfs_ypred = list()
dfs_erroravg = list()
dfs_modelresiduals = list()
files_to_parse = list()
splits = list()
for folder, subfolders, files in os.walk(path_to_test):
if 'split' in folder:
splits.append(folder)
for path in splits:
if os.path.exists(os.path.join(path, str(data_test_type)+'_normalized_error.csv')):
files_to_parse.append(os.path.join(path, str(data_test_type)+'_normalized_error.csv'))
for file in files_to_parse:
df = pd.read_csv(file)
dfs_ytrue.append(np.array(df['Y True']))
dfs_ypred.append(np.array(df['Y Pred']))
dfs_erroravg.append(np.array(df['error_avg']))
dfs_modelresiduals.append(np.array(df['model residuals']))
ytrue_all = np.concatenate(dfs_ytrue).ravel()
ypred_all = np.concatenate(dfs_ypred).ravel()
erroravg_all = np.concatenate(dfs_erroravg).ravel().tolist()
modelresiduals_all = np.concatenate(dfs_modelresiduals).ravel().tolist()
absmodelresiduals_all = [abs(i) for i in modelresiduals_all]
squaredmodelresiduals_all = [i**2 for i in absmodelresiduals_all]
erroravg_all_reduced = [i/dataset_stdev for i in erroravg_all]
# Need to square the dataset_stdev here since these are squared residuals
squaredmodelresiduals_all_reduced = [i/dataset_stdev**2 for i in squaredmodelresiduals_all]
erroravg_reduced_sorted, squaredresiduals_reduced_sorted = (list(t) for t in zip(*sorted(zip(erroravg_all_reduced, squaredmodelresiduals_all_reduced))))
bin_values = [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.05, 1.15, 1.25, 1.35, 1.45, 1.55]
bin_delta = 0.05
over_count = 0
over_vals = []
for e in erroravg_reduced_sorted:
if e > (max(bin_values) + bin_delta):
over_count += 1
over_vals.append(e)
if len(over_vals):
med_over_val = statistics.median(over_vals)
if med_over_val <= max(bin_values) * 2.0:
# just add another bin and put everthing in there
bin_values.append(1.65)
else:
# extend histogram
max_over_val = max(over_vals)
extra_bin_values = np.arange(1.65, max_over_val+1.0, 0.05)
bin_values = np.concatenate([bin_values, extra_bin_values])
rms_residual_values = list()
num_values_per_bin = list()
for bin_value in bin_values:
bin_indices = list()
bin_residuals = list()
for i, val in enumerate(erroravg_reduced_sorted):
if val > bin_value-bin_delta:
if bin_value == bin_values[len(bin_values)-1]:
bin_indices.append(i)
else:
if val < bin_value+bin_delta:
bin_indices.append(i)
for i in bin_indices:
bin_residuals.append(squaredresiduals_reduced_sorted[i])
rms_residual_values.append(np.sqrt(np.mean(bin_residuals)))
num_values_per_bin.append(len(bin_indices))
data_dict = {"Y True": ytrue_all,
"Y Pred": ypred_all,
"Model Residuals": modelresiduals_all,
"Abs Model Residuals": absmodelresiduals_all,
"Squared Model Resiuals": squaredmodelresiduals_all,
"Squared Model Residuals / dataset stdev": squaredmodelresiduals_all_reduced,
"Model errors": erroravg_all,
"Model errors / dataset stdev": erroravg_all_reduced,
"Model errors / dataset stdev (sorted)": erroravg_reduced_sorted,
"Squared Model Residuals / dataset stdev (sorted)": squaredresiduals_reduced_sorted,
"Bin values (Model errors / dataset stdev)": bin_values,
"Model RMS absolute residuals in each bin": rms_residual_values,
"Number of values in each bin": num_values_per_bin}
df = pd.DataFrame().from_dict(data=data_dict, orient='index').transpose()
df.to_excel(os.path.join(path_to_test, 'ModelErrorAnalysis_'+str(data_test_type)+'.xlsx'))
return bin_values, rms_residual_values, num_values_per_bin
def plot_1d_heatmap(xs, heats, savepath, xlabel='x', heatlabel='heats'):
"""
Method to plot a heatmap for values of a single variable; used for plotting GridSearch results in hyperparameter optimization.
Args:
xs: (numpy array), array of first variable values to plot heatmap against
heats: (numpy array), array of heat values to plot
savepath: (str), path to save the 1D heatmap to
xlabel: (str), the x-axis label
heatlabel: (str), the heat value axis label
"""
#TODO have more general solution
try:
fig, ax = make_fig_ax()
ax.bar(xs, heats)
ax.set_xlabel(xlabel)
ax.set_ylabel(heatlabel)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
# Escape from error of passing tuples when optimizing neural net
except TypeError:
pass
def plot_2d_heatmap(xs, ys, heats, savepath,
xlabel='x', ylabel='y', heatlabel='heat'):
"""
Method to plot a heatmap for values of two variables; used for plotting GridSearch results in hyperparameter optimization.
Args:
xs: (numpy array), array of first variable values to plot heatmap against
ys: (numpy array), array of second variable values to plot heatmap against
heats: (numpy array), array of heat values to plot
savepath: (str), path to save the 2D heatmap to
xlabel: (str), the x-axis label
ylabel: (str), the y-axis label
heatlabel: (str), the heat value axis label
"""
#TODO have more general solution
try:
fig, ax = make_fig_ax()
scat = ax.scatter(xs, ys, c=heats) # marker='o', lw=0, s=20, cmap=cm.plasma
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
cb = fig.colorbar(scat)
cb.set_label(heatlabel)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
# Escape from error of passing tuples when optimizing neural net
except TypeError:
pass
def plot_3d_heatmap(xs, ys, zs, heats, savepath,
xlabel='x', ylabel='y', zlabel='z', heatlabel='heat'):
"""
Method to plot a heatmap for values of three variables; used for plotting GridSearch results in hyperparameter optimization.
Args:
xs: (numpy array), array of first variable values to plot heatmap against
ys: (numpy array), array of second variable values to plot heatmap against
zs: (numpy array), array of third variable values to plot heatmap against
heats: (numpy array), array of heat values to plot
savepath: (str), path to save the 2D heatmap to
xlabel: (str), the x-axis label
ylabel: (str), the y-axis label
zlabel: (str), the z-axis label
heatlabel: (str), the heat value axis label
"""
# Escape from error of passing tuples when optimzing neural net
# TODO have more general solution
try:
# this import has side effects, needed for 3d plots:
from mpl_toolkits.mplot3d import Axes3D
# Set image aspect ratio:
# (eeds to be wide enough or plot will shrink really skinny)
w, h = figaspect(0.6)
fig = Figure(figsize=(w,h))
FigureCanvas(fig) # modifies fig in place
ax = fig.add_subplot(111, projection='3d')
scat = ax.scatter(xs, ys, zs, c=heats)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
cb = fig.colorbar(scat)
cb.set_label(heatlabel)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
except TypeError:
pass
def animate(i):
ax.view_init(elev=10., azim=i)
return [fig]
anim = FuncAnimation(fig, animate, frames=range(0,90,5), blit=True)
#anim.save(savepath+'.mp4', fps=5, extra_args=['-vcodec', 'libx264'])
anim.save(savepath+'.gif', fps=5, dpi=80, writer='imagemagick')
@ipynb_maker
def plot_learning_curve(train_sizes, train_mean, test_mean, train_stdev, test_stdev, score_name, learning_curve_type, savepath='data_learning_curve'):
"""
Method used to plot both data and feature learning curves
Args:
train_sizes: (numpy array), array of x-axis values, such as fraction of data used or number of features
train_mean: (numpy array), array of training data mean values, averaged over some type/number of CV splits
test_mean: (numpy array), array of test data mean values, averaged over some type/number of CV splits
train_stdev: (numpy array), array of training data standard deviation values, from some type/number of CV splits
test_stdev: (numpy array), array of test data standard deviation values, from some type/number of CV splits
score_name: (str), type of score metric for learning curve plotting; used in y-axis label
learning_curve_type: (str), type of learning curve employed: 'sample_learning_curve' or 'feature_learning_curve'
savepath: (str), path to save the plotted learning curve to
Returns:
None
"""
# Set image aspect ratio (do custom for learning curve):
w, h = figaspect(0.75)
fig = Figure(figsize=(w,h))
FigureCanvas(fig)
gs = plt.GridSpec(1, 1)
ax = fig.add_subplot(gs[0:, 0:])
max_x = max(train_sizes)
min_x = min(train_sizes)
max_y, min_y = recursive_max_and_min([
train_mean,
train_mean + train_stdev,
train_mean - train_stdev,
test_mean,
test_mean + test_stdev,
test_mean - test_stdev,
])
max_x = round(float(max_x), rounder(max_x-min_x))
min_x = round(float(min_x), rounder(max_x-min_x))
max_y = round(float(max_y), rounder(max_y-min_y))
min_y = round(float(min_y), rounder(max_y-min_y))
_set_tick_labels_different(ax, max_x, min_x, max_y, min_y)
# plot and collect handles h1 and h2 for making legend
h1 = ax.plot(train_sizes, train_mean, '-o', color='blue', markersize=10, alpha=0.7)[0]
ax.fill_between(train_sizes, train_mean-train_stdev, train_mean+train_stdev,
alpha=0.1, color='blue')
h2 = ax.plot(train_sizes, test_mean, '-o', color='red', markersize=10, alpha=0.7)[0]
ax.fill_between(train_sizes, test_mean-test_stdev, test_mean+test_stdev,
alpha=0.1, color='red')
ax.legend([h1, h2], ['train score', 'validation score'], loc='center right', fontsize=12)
if learning_curve_type == 'sample_learning_curve':
ax.set_xlabel('Number of training data points', fontsize=16)
elif learning_curve_type == 'feature_learning_curve':
ax.set_xlabel('Number of features selected', fontsize=16)
else:
raise ValueError('The param "learning_curve_type" must be either "sample_learning_curve" or "feature_learning_curve"')
ax.set_ylabel(score_name, fontsize=16)
fig.savefig(savepath+'.png', dpi=DPI, bbox_inches='tight')
# Save output data to spreadsheet
df_concat = pd.concat([pd.DataFrame(train_sizes), pd.DataFrame(train_mean), pd.DataFrame(train_stdev),
pd.DataFrame(test_mean), pd.DataFrame(test_stdev)], 1)
df_concat.columns = ['train_sizes', 'train_mean', 'train_stdev', 'test_mean', 'test_stdev']
df_concat.to_csv(savepath+'.csv')
try:
plot_learning_curve_convergence(train_sizes, test_mean, score_name, learning_curve_type, savepath)
except IndexError:
logger.error('MASTML encountered an error while trying to plot the learning curve convergences plots, likely due to '
'insufficient data')
@ipynb_maker
def plot_learning_curve_convergence(train_sizes, test_mean, score_name, learning_curve_type, savepath):
"""
Method used to plot both the convergence of data and feature learning curves as a function of amount of data or features
used, respectively.
Args:
train_sizes: (numpy array), array of x-axis values, such as fraction of data used or number of features
test_mean: (numpy array), array of test data mean values, averaged over some type/number of CV splits
score_name: (str), type of score metric for learning curve plotting; used in y-axis label
learning_curve_type: (str), type of learning curve employed: 'sample_learning_curve' or 'feature_learning_curve'
savepath: (str), path to save the plotted convergence learning curve to
Returns:
None
"""
# Function to examine the minimization of error in learning curve CV scores as function of amount of data or number
# of features used.
steps = [x for x in range(len(train_sizes))]
test_mean = test_mean.tolist()
slopes = list()
for step, val in zip(range(len(steps)), range(len(test_mean))):
if step+1 < len(steps):
slopes.append((test_mean[val + 1] - test_mean[val]) / (steps[step + 1] - steps[step]))
# Remove first entry to steps for plotting
del steps[0]
# Get moving average of slopes to generate smoother curve
window_size = round(len(test_mean)/3)
steps_moving_average = pd.DataFrame(steps).rolling(window=window_size).mean()
slopes_moving_average = pd.DataFrame(slopes).rolling(window=window_size).mean()
#### Plotting
# Set image aspect ratio (do custom for learning curve):
w, h = figaspect(0.75)
fig = Figure(figsize=(w,h))
FigureCanvas(fig)
gs = plt.GridSpec(1, 1)
ax = fig.add_subplot(gs[0:, 0:])
max_x = max(steps)
min_x = min(steps)
max_y = max(slopes)
min_y = min(slopes)
_set_tick_labels_different(ax, max_x, min_x, max_y, min_y)
ax.plot(steps, slopes, '-o', color='blue', markersize=10, alpha=0.7)
ax.plot(steps_moving_average, slopes_moving_average, '-o', color='green', markersize=10, alpha=0.7)
ax.legend(['score slope', 'smoothed score slope'], loc='lower right', fontsize=12)
ax.set_xlabel('Learning curve step', fontsize=16)
ax.set_ylabel('Change in '+score_name, fontsize=16)
fig.savefig(savepath+'_convergence'+'.png', dpi=DPI, bbox_inches='tight')
datadict = {"Steps": np.array(steps), "Slopes": np.array(slopes),
"Steps moving average": np.squeeze(np.array(steps_moving_average)),
"Slopes moving average": np.squeeze(np.array(slopes_moving_average))}
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import seaborn as sns
from fbprophet import Prophet
import matplotlib.pyplot as plt
from pyspark.sql import SparkSession
cluster_seeds = ['127.0.0.1']
spark = SparkSession.builder.appName('Forecasting Crime').config('spark.cassandra.connection.host', ','.join(cluster_seeds)).getOrCreate()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
sc = spark.sparkContext
## Using FB Prophet model to forecast crime data for 2019
def crime_forecasting(KEYSPACE = 'pirates'):
pan = spark.read.format("org.apache.spark.sql.cassandra").options(table='chicagocrime', keyspace=KEYSPACE).load()
pan = pan.toPandas()
## **************************NOTE**************************************
## For some OS gives JAVA heap error, then use the following command to load data
#pan = pd.read_csv('../datafile/mycsv/part-00000-8101e9fe-075a-4eed-b16b-2ca079001c1b-c000.csv', error_bad_lines=False)
plt.figure(figsize=(15, 10))
svm = sns.countplot(y='crimetype', data=pan, order=pan['crimetype'].value_counts().iloc[:15].index)
fig = svm.get_figure()
fig.savefig("../static/images/forecasting/theft_count.png")
plt.figure(figsize=(15, 10))
svm = sns.countplot(y='location_description', data=pan, order=pan['location_description'].value_counts().iloc[:15].index)
fig = svm.get_figure()
fig.savefig("../static/images/forecasting/location_description_count.png")
pan.index =
|
pd.DatetimeIndex(pan.occurrence_date)
|
pandas.DatetimeIndex
|
import pandas as pd
import numpy as np
from .errors import MetricLookupException
class TableLookupError(MetricLookupException): pass
class AAindexEntry():
def __init__(self, accession, data_description, pmid, authors, title, journal_ref, similar, aadata):
self.accession = accession
self.data_description = data_description
self.pmid = pmid
self.authors = authors
self.title = title
self.journal_ref = journal_ref
self.similar = similar
self.aadata = aadata
def __repr__(self):
s = ""
s += 'Accession: {}\n'.format(self.accession)
s += 'data_description: {}\n'.format(self.data_description)
s += 'pmid: {}\n'.format(self.pmid)
s += 'title: {}\n'.format(self.title)
s += 'journal_ref: {}\n'.format(self.journal_ref)
s += 'similar:\n{}\n'.format(self.similar)
s += 'aadata:\n{}\n'.format(self.aadata)
return s
class AAindex():
# Read and store the aaindex data
# Make a big dataframe of results.
def __init__(self, aaindex_file):
self.data = {}
self.index = None
self._read_aaindex(aaindex_file)
self._make_index()
def _read_aaindex(self, aaindex_file):
"""
From the documentation
* Each entry has the following format. *
* *
* H Accession number *
* D Data description *
* R PMID *
* A Author(s) *
* T Title of the article *
* J Journal reference *
* * Comment or missing *
* C Accession numbers of similar entries with the correlation *
* coefficients of 0.8 (-0.8) or more (less). *
* Notice: The correlation coefficient is calculated with zeros *
* filled for missing values. *
* I Amino acid index data in the following order *
* Ala Arg Asn Asp Cys Gln Glu Gly His Ile *
* Leu Lys Met Phe Pro Ser Thr Trp Tyr Val *
"""
res = []
aaline = 0
with open(aaindex_file) as fh:
similar_data = []
similar = False
aa_res = []
aa_line = 0
for line in fh:
if line.startswith('//'):
self.data[desc] = AAindexEntry(accession, desc, pmid, authors, title, journal_ref, similar_data,
aa_res)
aa_line = 0
similar_data = []
aa_res = []
elif aa_line > 0:
aa_res.extend(self._read_aa(line, aa_line))
aa_line += 1
if aa_line == 3:
aa_res = pd.DataFrame(aa_res, columns=['AA', 'Value'])
elif line.startswith('H'):
# Accession number
accession = self._read_line(line)
elif line.startswith('D'):
desc = self._read_line(line)
D = True
elif line.startswith('R'):
pmid = self._read_line(line)
D = False
elif line.startswith('A'):
authors = self._read_line(line)
A = True
elif line.startswith('T'):
title = self._read_line(line)
A = False
T = True
elif line.startswith('J'):
journal_ref = self._read_line(line)
J = True
T = False
elif line.startswith('C'):
J = False
similar = True
similar_data.extend(self._read_similar(line))
elif line.startswith('I'):
aa_line = 1
similar = False
if similar_data:
similar_data = pd.DataFrame(similar_data, columns=['accession', 'corr_coef'])
else:
similar_data = None
elif similar:
similar_data.extend(self._read_similar(line))
elif D:
desc += " " + line.strip()
elif A:
authors += " " + line.strip()
elif T:
title += " " + line.strip()
elif J:
journal_ref += " " + line.strip()
def _read_line(self, line):
try:
res = line.strip().split()
res = " ".join(res[1:])
except IndexError as e: # Empty
res = None
return res
def _read_similar(self, line):
res = []
for i, j in enumerate(line.strip()[2:].split()):
if i % 2 == 0:
acc = j
else:
res.append({'accession': acc, 'corr_coef': self._convert_to_numeric(j)})
return res
def _read_aa(self, line, aa_line):
if aa_line == 1:
amino_acids = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I']
elif aa_line == 2:
amino_acids = ['L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
res = []
for aa, value in zip(amino_acids, line.strip().split()):
res.append({'AA': aa, 'Value': self._convert_to_numeric(value)})
return res
def _convert_to_numeric(self, string):
# Integer values are written as 16.
if string == 'NA':
return np.nan
elif string.endswith('.'):
return int(string[:-1])
else:
return float(string)
def _make_index(self):
columns = ['accession', 'data_description', 'pmid', 'authors', 'title', 'journal_ref']
res = []
for i, aa in self.data.items():
res.append({k: getattr(aa, k) for k in columns})
self.index =
|
pd.DataFrame(res, columns=columns)
|
pandas.DataFrame
|
"""
Plot the kinetic reactions of biomass pyrolysis for the Ranzi 2014 kinetic
scheme for biomass pyrolysis.
Reference:
<NAME>, 2014. Chemical Engineering Science, 110, pp 2-12.
"""
import numpy as np
import pandas as pd
# Parameters
# ------------------------------------------------------------------------------
# T = 773 # temperature for rate constants, K
# weight percent (%) cellulose, hemicellulose, lignin for beech wood
# wtcell = 48
# wthemi = 28
# wtlig = 24
# dt = 0.001 # time step, delta t
# tmax = 4 # max time, s
# t = np.linspace(0, tmax, num=int(tmax/dt)) # time vector
# nt = len(t) # total number of time steps
# Functions for Ranzi 2014 Kinetic Scheme
# ------------------------------------------------------------------------------
def ranzicell(wood, wt, T, dt, nt):
"""
Cellulose reactions CELL from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood concentration, kg/m^3
wt = weight percent wood as cellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main = mass concentration of main group, (-)
prod = mass concentration of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
cell = pw*(wt/100) # initial cellulose conc. in wood
g1 = np.zeros(nt) # G1
cella = np.zeros(nt) # CELLA
lvg = np.zeros(nt) # LVG
g4 = np.zeros(nt) # G4
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 4e7 * np.exp(-31000 / (R * T)) # CELL -> G1
K2 = 4e13 * np.exp(-45000 / (R * T)) # CELL -> CELLA
K3 = 1.8 * T * np.exp(-10000 / (R * T)) # CELLA -> LVG
K4 = 0.5e9 * np.exp(-29000 / (R * T)) # CELLA -> G4
# sum of moles in each group, mol
sumg1 = 11 # sum of G1
sumg4 = 4.08 # sum of G4
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * cell[i-1] # CELL -> G1
r2 = K2 * cell[i-1] # CELL -> CELLA
r3 = K3 * cella[i-1] # CELLA -> LVG
r4 = K4 * cella[i-1] # CELLA -> G4
cell[i] = cell[i-1] - (r1+r2)*dt # CELL
g1[i] = g1[i-1] + r1*dt # G1
cella[i] = cella[i-1] + r2*dt - (r3+r4)*dt # CELLA
lvg[i] = lvg[i-1] + r3*dt # LVG
g4[i] = g4[i-1] + r4*dt # G4
# store main groups in array
main = np.array([cell, g1, cella, lvg, g4])
# total group concentration per total moles in that group, (kg/m^3) / mol
fg1 = g1/sumg1 # fraction of G1
fg4 = g4/sumg4 # fraction of G4
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.16*fg4 # CO
prod[1] = 0.21*fg4 # CO2
prod[2] = 0.4*fg4 # CH2O
prod[3] = 0.02*fg4 # HCOOH
prod[5] = 0.1*fg4 # CH4
prod[6] = 0.2*fg4 # Glyox
prod[8] = 0.1*fg4 # C2H4O
prod[9] = 0.8*fg4 # HAA
prod[11] = 0.3*fg4 # C3H6O
prod[14] = 0.25*fg4 # HMFU
prod[15] = lvg # LVG
prod[18] = 0.1*fg4 # H2
prod[19] = 5*fg1 + 0.83*fg4 # H2O
prod[20] = 6*fg1 + 0.61*fg4 # Char
# return arrays of main groups and products as mass fraction, (-)
return main/wood, prod/wood
def ranzihemi(wood, wt, T, dt, nt):
"""
Hemicellulose reactions HCE from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of hemicellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
hce = pw*(wt/100) # initial hemicellulose conc. in wood
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
xyl = np.zeros(nt) # Xylan
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.33e10 * np.exp(-31000 / (R * T)) # HCE -> G1
K2 = 0.33e10 * np.exp(-33000 / (R * T)) # HCE2 -> G2
K3 = 0.05 * T * np.exp(-8000 / (R * T)) # HCE1 -> G3
K4 = 1e9 * np.exp(-32000 / (R * T)) # HCE1 -> G4
K5 = 0.9 * T * np.exp(-11000 / (R * T)) # HCE1 -> Xylan
# sum of moles in each group, mol
sumg2 = 4.625 # sum of G2
sumg3 = 4.875 # sum of G3
sumg4 = 4.775 # sum of G4
# calculate concentrations for main groups, kg/m^3
# where HCE1 as 0.4*g1/(0.4+0.6) and HCE2 as 0.6*g1/(0.4+0.6)
for i in range(1, nt):
r1 = K1 * hce[i-1] # HCE -> G1
r2 = K2 * 0.6*g1[i-1] # HCE2 -> G2
r3 = K3 * 0.4*g1[i-1] # HCE1 -> G3
r4 = K4 * 0.4*g1[i-1] # HCE1 -> G4
r5 = K5 * 0.4*g1[i-1] # HCE1 -> Xylan
hce[i] = hce[i-1] - r1*dt # HCE
g1[i] = g1[i-1] + r1*dt - (r2+r3+r4+r5)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
xyl[i] = xyl[i-1] + r5*dt # Xylan
# store main groups in array
main = np.array([hce, g1, g2, g3, g4, xyl])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.175*fg2 + (0.3 + 0.15)*fg3 + 0.5*fg4 # CO
prod[1] = (0.275+0.4)*fg2 + (0.5+0.25)*fg3 + (0.5+0.275)*fg4 # CO2
prod[2] = (0.5+0.925)*fg2 + 1.7*fg3 + (0.8+0.4)*fg4 # CH2O
prod[3] = 0.025*fg2 + 0.05*fg3 + 0.025*fg4 # HCOOH
prod[4] = 0.3*fg2 + (0.1+0.45)*fg4 # CH3OH
prod[5] = 0.25*fg2 + 0.625*fg3 + 0.325*fg4 # CH4
prod[7] = 0.275*fg2 + 0.375*fg3 + 0.25*fg4 # C2H4
prod[9] = 0.2*fg2 # HAA
prod[10] = 0.1*fg2 + 0.125*fg4 # C2H5OH
prod[12] = xyl # Xylan
prod[18] = 0.125*fg4 # H2
prod[19] = 0.2*fg2 + 0.25*fg3 + 0.025*fg4 # H2O
prod[20] = 1*fg2 + 0.675*fg3 + 0.875*fg4 # Char
# return arrays of main groups and products as mass fraction, (-)
return main/wood, prod/wood
def ranziligc(wood, wt, T, dt, nt):
"""
Lignin carbon rich reactions LIG-C from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-c, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligc = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt)
g2 = np.zeros(nt)
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 1.33e15 * np.exp(-48500 / (R * T)) # LIG-C -> G1
K2 = 1.6e6 * np.exp(-31500 / (R * T)) # LIG-CC -> G2
# sum of moles in each group, mol
sumg1 = 9.49 # sum of G1
sumg2 = 11.35 # sum of G2
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligc[i-1] # LIG-C -> G1
r2 = K2 * 0.35*g1[i-1]/sumg1 # LIG-CC -> G2
ligc[i] = ligc[i-1] - r1*dt # LIG-C
g1[i] = g1[i-1] + r1*dt - r2*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
# store main groups in array
main = np.array([ligc, g1, g2])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = 0.32*fg1 + (0.4 + 0.4)*fg2 # CO
prod[2] = (0.3 + 0.7)*fg1 + 1*fg2 # CH2O
prod[5] = 0.495*fg1 + 0.65*fg2 # CH4
prod[7] = 0.41*fg1 + 0.6*fg2 # C2H4
prod[9] = 0.35*fg2 # HAA
prod[13] = 0.08*fg1 + 0.2*fg2 # Phenol
prod[16] = 0.1*fg1 + 0.3*fg2 # Coumaryl
prod[19] = 1*fg1 + 0.7*fg2 # H2O
prod[20] = 5.735*fg1 + 6.75*fg2 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
def ranziligh(wood, wt, T, dt, nt):
"""
Lignin hydrogen rich reactions LIG-H from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-h, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligh = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
g5 = np.zeros(nt) # G4
fe2macr = np.zeros(nt) # FE2MACR
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.67e13 * np.exp(-37500 / (R * T)) # LIG-H -> G1
K2 = 33 * np.exp(-15000 / (R * T)) # LIG-OH -> G2
K3 = 0.5e8 * np.exp(-30000 / (R * T)) # LIG-OH -> LIG
K4 = 0.083 * T * np.exp(-8000 / (R * T)) # LIG -> G4
K5 = 0.4e9 * np.exp(-30000 / (R * T)) # LIG -> G5
K6 = 2.4 * T * np.exp(-12000 / (R * T)) # LIG -> FE2MACR
# sum of moles in each group, mol
sumg1 = 2 # sum of G1
sumg2 = 20.7 # sum of G2
sumg3 = 9.85 # sum of G3
sumg4 = 11.1 # sum of G4
sumg5 = 10.7 # sum of G5
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligh[i-1] # LIG-H -> G1
r2 = K2 * 1*g1[i-1]/sumg1 # LIG-OH -> G2
r3 = K3 * 1*g1[i-1]/sumg1 # LIG-OH -> LIG
r4 = K4 * 1*g3[i-1]/sumg3 # LIG -> G4
r5 = K5 * 1*g3[i-1]/sumg3 # LIG -> G5
r6 = K6 * 1*g3[i-1]/sumg3 # LIG -> FE2MACR
ligh[i] = ligh[i-1] - r1*dt # LIG-H
g1[i] = g1[i-1] + r1*dt - (r2+r3)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt - (r4+r5+r6)*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
g5[i] = g5[i-1] + r5*dt # G5
fe2macr[i] = fe2macr[i-1] + r6*dt # FE2MACR
# store main groups in array
main = np.array([ligh, g1, g2, g3, g4, g5, fe2macr])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
fg5 = g5/sumg5 # fraction of G5
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = (0.5 + 1.6)*fg2 + (0.3 + 1)*fg3 + (0.4 + 0.2)*fg4 + (1 + 0.45)*fg5 # CO
prod[1] = 0.05*fg3 # CO2
prod[2] = 3.9*fg2 + 0.6*fg3 + (2 + 0.4)*fg4 + (0.2 + 0.5)*fg5 # CH2O
prod[3] = 0.05*fg3 + 0.05*fg5 # HCOOH
prod[4] = 0.5*fg2 + (0.5 + 0.5)*fg3 + 0.4*fg4 + 0.4*fg5 # CH3OH
prod[5] = (0.1 + 1.65)*fg2 + (0.1 + 0.35)*fg3 + (0.2 + 0.4)*fg4 + (0.2 + 0.4)*fg5 # CH4
prod[6] = 0 # Glyox
prod[7] = 0.3*fg2 + 0.2*fg3 + 0.5*fg4 + 0.65*fg5 # C2H4
prod[8] = 0.2*fg5 # C2H4O
prod[9] = 0 # HAA
prod[10] = 0 # C2H5OH
prod[11] = 1*fg1 + 0.2*fg5 # C3H6O
prod[12] = 0 # Xylan
prod[13] = 0 # Phenol
prod[14] = 0 # HMFU
prod[15] = 0 # LVG
prod[16] = 0 # Coumaryl
prod[17] = fe2macr # FE2MACR
prod[18] = 0.5*fg2 + 0.15*fg3 # H2
prod[19] = 1.5*fg2 + 0.9*fg3 + 0.6*fg4 + 0.95*fg5 # H2O
prod[20] = 10.15*fg2 + 4.15*fg3 + 6*fg4 + 5.5*fg5 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
def ranziligo(wood, wt, T, dt, nt):
"""
Lignin oxygen rich reactions LIG-O from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood density, kg/m^3
wt = weight percent of lignin-h, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main/wood = mass fraction of main group, (-)
prod/wood = mass fraction of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
ligo = pw*(wt/100/3) # initial lignin in wood, assume 1/3 of total lignin
g1 = np.zeros(nt) # G1
g2 = np.zeros(nt) # G2
g3 = np.zeros(nt) # G3
g4 = np.zeros(nt) # G4
g5 = np.zeros(nt) # G4
fe2macr = np.zeros(nt) # FE2MACR
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 0.33e9 * np.exp(-25500 / (R * T)) # LIG-O -> G1
K2 = 33 * np.exp(-15000 / (R * T)) # LIG-OH -> G2
K3 = 0.5e8 * np.exp(-30000 / (R * T)) # LIG-OH -> LIG
K4 = 0.083 * T * np.exp(-8000 / (R * T)) # LIG -> G4
K5 = 0.4e9 * np.exp(-30000 / (R * T)) # LIG -> G5
K6 = 2.4 * T * np.exp(-12000 / (R * T)) # LIG -> FE2MACR
# sum of moles in each group, mol
sumg1 = 2 # sum of G1
sumg2 = 20.7 # sum of G2
sumg3 = 9.85 # sum of G3
sumg4 = 11.1 # sum of G4
sumg5 = 10.7 # sum of G5
# calculate concentrations for main groups, kg/m^3
for i in range(1, nt):
r1 = K1 * ligo[i-1] # LIG-O -> G1
r2 = K2 * 1*g1[i-1]/sumg1 # LIG-OH -> G2
r3 = K3 * 1*g1[i-1]/sumg1 # LIG-OH -> LIG
r4 = K4 * 1*g3[i-1]/sumg3 # LIG -> G4
r5 = K5 * 1*g3[i-1]/sumg3 # LIG -> G5
r6 = K6 * 1*g3[i-1]/sumg3 # LIG -> FE2MACR
ligo[i] = ligo[i-1] - r1*dt # LIG-H
g1[i] = g1[i-1] + r1*dt - (r2+r3)*dt # G1
g2[i] = g2[i-1] + r2*dt # G2
g3[i] = g3[i-1] + r3*dt - (r4+r5+r6)*dt # G3
g4[i] = g4[i-1] + r4*dt # G4
g5[i] = g5[i-1] + r5*dt # G5
fe2macr[i] = fe2macr[i-1] + r6*dt # FE2MACR
# store main groups in array
main = np.array([ligo, g1, g2, g3, g4, g5, fe2macr])
# total group concentration per total moles in that group, (kg/m^3)/mol
fg1 = g1/sumg1 # fraction of G1
fg2 = g2/sumg2 # fraction of G2
fg3 = g3/sumg3 # fraction of G3
fg4 = g4/sumg4 # fraction of G4
fg5 = g5/sumg5 # fraction of G5
# array to store product concentrations as a density, kg/m^3
prod = np.zeros([21, nt])
prod[0] = (0.5 + 1.6)*fg2 + (0.3 + 1)*fg3 + (0.4 + 0.2)*fg4 + (1 + 0.45)*fg5 # CO
prod[1] = 1*fg1 + 0.05*fg3 # CO2
prod[2] = 3.9*fg2 + 0.6*fg3 + (2 + 0.4)*fg4 + (0.2 + 0.5)*fg5 # CH2O
prod[3] = 0.05*fg3 + 0.05*fg5 # HCOOH
prod[4] = 0.5*fg2 + (0.5 + 0.5)*fg3 + 0.4*fg4 + 0.4*fg5 # CH3OH
prod[5] = (0.1 + 1.65)*fg2 + (0.1 + 0.35)*fg3 + (0.2 + 0.4)*fg4 + (0.2 + 0.4)*fg5 # CH4
prod[6] = 0 # Glyox
prod[7] = 0.3*fg2 + 0.2*fg3 + 0.5*fg4 + 0.65*fg5 # C2H4
prod[8] = 0.2*fg5 # C2H4O
prod[9] = 0 # HAA
prod[10] = 0 # C2H5OH
prod[11] = 0.2*fg5 # C3H6O
prod[12] = 0 # Xylan
prod[13] = 0 # Phenol
prod[14] = 0 # HMFU
prod[15] = 0 # LVG
prod[16] = 0 # Coumaryl
prod[17] = fe2macr # FE2MACR
prod[18] = 0.5*fg2 + 0.15*fg3 # H2
prod[19] = 1.5*fg2 + 0.9*fg3 + 0.6*fg4 + 0.95*fg5 # H2O
prod[20] = 10.15*fg2 + 4.15*fg3 + 6*fg4 + 5.5*fg5 # Char
# return arrays of main groups and products as mass fractions, (-)
return main/wood, prod/wood
# Products from Kinetic Scheme
# ------------------------------------------------------------------------------
def run_ranzi_2014(wtcell, wthemi, wtlig, temp, tmax):
step = 0.001 # time step, delta t
# tmax = 4 # max time, s
t = np.linspace(0, tmax, num=int(tmax/step)) # time vector
tot_step = len(t) # total number of time steps
# arrays for Ranzi main groups and products as mass fractions, (-)
pmcell, pcell = ranzicell(1, wt=wtcell, T=temp, dt=step, nt=tot_step) # cellulose
pmhemi, phemi = ranzihemi(1, wt=wthemi, T=temp, dt=step, nt=tot_step) # hemicellulose
pmligc, pligc = ranziligc(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-c
pmligh, pligh = ranziligh(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-h
pmligo, pligo = ranziligo(1, wt=wtlig, T=temp, dt=step, nt=tot_step) # lignin-o
# main cellulose groups as mass fraction, (-)
cell = pmcell[0]
g1cell = pmcell[1]
cella = pmcell[2]
lvg = pmcell[3]
g4cell = pmcell[4]
tcell = cell + g1cell + cella + lvg + g4cell # total cellulose
cell_main = {'Time (s)': t, 'cell': cell, 'g1cell': g1cell, 'cella': cella, 'lvg': lvg, 'g4cell': g4cell, 'tcell': tcell}
df_cell=pd.DataFrame(data=cell_main).set_index('Time (s)')
# main hemicellulose groups as mass fraction, (-)
hemi = pmhemi[0]
g1hemi = pmhemi[1]
g2hemi = pmhemi[2]
g3hemi = pmhemi[3]
g4hemi = pmhemi[4]
xyl = pmhemi[5]
themi = hemi + g1hemi + g2hemi + g3hemi + g4hemi + xyl # total hemicellulose
hemi_main = {'Time (s)': t, 'hemi': hemi, 'g1hemi': g1hemi, 'g2hemi': g2hemi, 'g3hemi': g3hemi, 'g4hemi': g4hemi, 'xyl': xyl, 'themi': themi}
df_hemi=pd.DataFrame(data=hemi_main).set_index('Time (s)')
# main lignin-c groups as mass fraction, (-)
ligc = pmligc[0]
g1ligc = pmligc[1]
g2ligc = pmligc[2]
tligc = ligc + g1ligc + g2ligc # total lignin-c
ligc_main = {'Time (s)': t, 'ligc': ligc, 'g1ligc': g1ligc, 'g2ligc': g2ligc, 'tligc': tligc}
df_ligc=
|
pd.DataFrame(data=ligc_main)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import os, sys, json, asyncio, pymongo, datetime, time, pandas as pd, math, re
sys.path.append(os.path.abspath(os.getcwd()))
from flask import jsonify, request, Blueprint
from bson.json_util import dumps, ObjectId
from api_server.enviroment.enviroment import db
from api_server.robots.get_status_code import get_status_code
from api_server.utils.gets import get_user
from api_server.routes.auth import login_required
dashboard = Blueprint("dashboard", __name__, url_prefix="/dashboard")
def clear_text(text):
text.lower().replace(' ', '_').replace('&', 'e').replace('á','a').replace('ã','a').replace('ç','c').replace('õ','o')
return text
def build_evocucao(tipo, lists):
build_categories = {}
list_dates = []
df =
|
pd.read_json(lists)
|
pandas.read_json
|
import csv
import libsbml
import rr_cache
import tempfile
import numpy as np
import pandas as pd
from copy import deepcopy
from logging import (
Logger,
getLogger
)
from os import (
path as os_path,
remove
)
from tempfile import NamedTemporaryFile
from typing import (
Dict,
Iterable,
)
from cobra import io as cobra_io
from cobra.io.sbml import _f_reaction
from cobra.medium.annotations import (
excludes,
sbo_terms
)
from rptools.rplibs.rpCompound import rpCompound
from rptools.rplibs.rpSBML import rpSBML
from rptools.rpfba import medium
from rptools.rpfba.medium import (
build_minimal_medium,
is_df_medium_defined,
load_medium_file,
read_medium_ids,
load_compounds,
create_rp_compound,
crossref_medium_id,
merge_medium,
merge_medium_exchange,
df_to_medium,
add_missing_specie
)
from main_rpfba import Main_rpfba
class Test_medium(Main_rpfba):
# TODO: import directly from module
__MEDIUM_DEFAULT_ID = 'not_predefined_model'
__MEDIUM_HEADER_NAME = 'medium_name'
__MEDIUM_HEADER_COMPOUND_ID = 'compound_id'
__MEDIUM_HEADER_BOUND = 'upper_bound'
__MEDIUM_HEADER_OPTIONAL = ['compound_annotation', 'compound_group']
__MEDIUM_HEADER = __MEDIUM_HEADER_OPTIONAL + [__MEDIUM_HEADER_BOUND, __MEDIUM_HEADER_COMPOUND_ID, __MEDIUM_HEADER_NAME]
def load_medium_file(filename):
medium = pd.read_csv(filename)
return create_rp_compound(
df=medium,
logger=self.logger
)
def setUp(self):
super().setUp()
# self.logger.setLevel('DEBUG')
# objects below have to be created for each test instance
# since some tests can modified them
def test_is_df_medium_defined(self):
# Return type
self.assertTrue(isinstance(is_df_medium_defined(None), bool))
# Values
self.assertFalse(is_df_medium_defined(None))
self.assertFalse(is_df_medium_defined(np.nan))
self.assertFalse(is_df_medium_defined(pd.DataFrame()))
self.assertFalse(is_df_medium_defined(pd.DataFrame(columns=['a'])))
self.assertFalse(is_df_medium_defined(pd.DataFrame(index=[0])))
self.assertTrue(is_df_medium_defined(pd.DataFrame(data=[1], columns=['a'], index=[0])))
def test_load_medium_file(self):
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.tsv'))
# Return type
self.assertTrue(isinstance(df, pd.DataFrame))
# Basic io profile
self.assertTrue(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.d.csv'))
self.assertFalse(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.xlsx'))
self.assertFalse(is_df_medium_defined(df))
df = load_medium_file(os_path.join(self.medium_path, 'medium.io.a.csv'))
self.assertTrue(is_df_medium_defined(df))
# Type expected
self.assertTrue(pd.api.types.is_float_dtype(df[self.__MEDIUM_HEADER_BOUND]))
self.assertEqual(
sum(
df['rp_compound'].apply(lambda x: isinstance(x, rpCompound) or pd.isna(x))
),
len(df['rp_compound'])
)
# Challenge on column labels
df_columns = df.columns.tolist()
df_columns.remove('rp_compound')
self.assertEqual(
sorted(df_columns),
sorted(self.__MEDIUM_HEADER)
)
tmp_file = tempfile.NamedTemporaryFile(
suffix='.csv',
dir=self.temp_d,
delete=False
)
for ix in range(len(self.__MEDIUM_HEADER)):
tmp_header = deepcopy(self.__MEDIUM_HEADER)
tmp_header = tmp_header.pop(ix)
df_tmp = df[tmp_header]
df_tmp.to_csv(
tmp_file.name,
index=False
)
df_tmp = load_medium_file(tmp_file.name)
self.assertFalse(is_df_medium_defined(df_tmp))
tmp_file.close()
remove(tmp_file.name)
def test_read_medium_ids(self):
ids = read_medium_ids(os_path.join(self.medium_path, 'medium.io.b.csv'))
# Return type.
self.assertTrue(ids, Iterable)
# Values.
self.assertEqual(
sorted([x for x in ids if not
|
pd.isna(x)
|
pandas.isna
|
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core.indexes import PandasIndex, PandasMultiIndex, _asarray_tuplesafe
from xarray.core.variable import IndexVariable
def test_asarray_tuplesafe() -> None:
res = _asarray_tuplesafe(("a", 1))
assert isinstance(res, np.ndarray)
assert res.ndim == 0
assert res.item() == ("a", 1)
res = _asarray_tuplesafe([(0,), (1,)])
assert res.shape == (2,)
assert res[0] == (0,)
assert res[1] == (1,)
class TestPandasIndex:
def test_constructor(self) -> None:
pd_idx = pd.Index([1, 2, 3])
index = PandasIndex(pd_idx, "x")
assert index.index is pd_idx
assert index.dim == "x"
def test_from_variables(self) -> None:
var = xr.Variable(
"x", [1, 2, 3], attrs={"unit": "m"}, encoding={"dtype": np.int32}
)
index, index_vars = PandasIndex.from_variables({"x": var})
xr.testing.assert_identical(var.to_index_variable(), index_vars["x"])
assert index.dim == "x"
assert index.index.equals(index_vars["x"].to_index())
var2 = xr.Variable(("x", "y"), [[1, 2, 3], [4, 5, 6]])
with pytest.raises(ValueError, match=r".*only accepts one variable.*"):
PandasIndex.from_variables({"x": var, "foo": var2})
with pytest.raises(
ValueError, match=r".*only accepts a 1-dimensional variable.*"
):
PandasIndex.from_variables({"foo": var2})
def test_from_pandas_index(self) -> None:
pd_idx = pd.Index([1, 2, 3], name="foo")
index, index_vars = PandasIndex.from_pandas_index(pd_idx, "x")
assert index.dim == "x"
assert index.index is pd_idx
assert index.index.name == "foo"
xr.testing.assert_identical(index_vars["foo"], IndexVariable("x", [1, 2, 3]))
# test no name set for pd.Index
pd_idx.name = None
index, index_vars = PandasIndex.from_pandas_index(pd_idx, "x")
assert "x" in index_vars
assert index.index is not pd_idx
assert index.index.name == "x"
def to_pandas_index(self):
pd_idx = pd.Index([1, 2, 3], name="foo")
index = PandasIndex(pd_idx, "x")
assert index.to_pandas_index() is pd_idx
def test_query(self) -> None:
# TODO: add tests that aren't just for edge cases
index = PandasIndex(pd.Index([1, 2, 3]), "x")
with pytest.raises(KeyError, match=r"not all values found"):
index.query({"x": [0]})
with pytest.raises(KeyError):
index.query({"x": 0})
with pytest.raises(ValueError, match=r"does not have a MultiIndex"):
index.query({"x": {"one": 0}})
def test_query_datetime(self) -> None:
index = PandasIndex(
pd.to_datetime(["2000-01-01", "2001-01-01", "2002-01-01"]), "x"
)
actual = index.query({"x": "2001-01-01"})
expected = (1, None)
assert actual == expected
actual = index.query({"x": index.to_pandas_index().to_numpy()[1]})
assert actual == expected
def test_query_unsorted_datetime_index_raises(self) -> None:
index = PandasIndex(pd.to_datetime(["2001", "2000", "2002"]), "x")
with pytest.raises(KeyError):
# pandas will try to convert this into an array indexer. We should
# raise instead, so we can be sure the result of indexing with a
# slice is always a view.
index.query({"x": slice("2001", "2002")})
def test_equals(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([1, 2, 3], "x")
assert index1.equals(index2) is True
def test_union(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([4, 5, 6], "y")
actual = index1.union(index2)
assert actual.index.equals(pd.Index([1, 2, 3, 4, 5, 6]))
assert actual.dim == "x"
def test_intersection(self) -> None:
index1 = PandasIndex([1, 2, 3], "x")
index2 = PandasIndex([2, 3, 4], "y")
actual = index1.intersection(index2)
assert actual.index.equals(
|
pd.Index([2, 3])
|
pandas.Index
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import itertools
import warnings
from warnings import catch_warnings
from datetime import datetime
from pandas.types.common import (is_integer_dtype,
is_float_dtype,
is_scalar)
from pandas.compat import range, lrange, lzip, StringIO, lmap
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Timestamp, Timedelta, UInt64Index)
from pandas.formats.printing import pprint_thing
from pandas import concat
from pandas.core.common import PerformanceWarning
from pandas.tests.indexing.common import _mklbl
import pandas.util.testing as tm
from pandas import date_range
_verbose = False
# ------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [lrange(len(a)) for a in axes]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
# v = f
# for a in reversed(i):
# v = v.__getitem__(a)
# return v
with catch_warnings(record=True):
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj, key, axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class TestIndexing(tm.TestCase):
_objs = set(['series', 'frame', 'panel'])
_typs = set(['ints', 'uints', 'labels', 'mixed',
'ts', 'floats', 'empty', 'ts_rev'])
def setUp(self):
self.series_ints = Series(np.random.rand(4), index=lrange(0, 8, 2))
self.frame_ints = DataFrame(np.random.randn(4, 4),
index=lrange(0, 8, 2),
columns=lrange(0, 12, 3))
self.panel_ints = Panel(np.random.rand(4, 4, 4),
items=lrange(0, 8, 2),
major_axis=lrange(0, 12, 3),
minor_axis=lrange(0, 16, 4))
self.series_uints = Series(np.random.rand(4),
index=UInt64Index(lrange(0, 8, 2)))
self.frame_uints = DataFrame(np.random.randn(4, 4),
index=UInt64Index(lrange(0, 8, 2)),
columns=UInt64Index(lrange(0, 12, 3)))
self.panel_uints = Panel(np.random.rand(4, 4, 4),
items=UInt64Index(lrange(0, 8, 2)),
major_axis=UInt64Index(lrange(0, 12, 3)),
minor_axis=UInt64Index(lrange(0, 16, 4)))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4),
index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4, 4, 4),
items=list('abcd'),
major_axis=list('ABCD'),
minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4),
index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4, 4, 4),
items=[2, 4, 'null', 8])
self.series_ts = Series(np.random.randn(4),
index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4),
index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4),
items=date_range('20130101', periods=4))
dates_rev = (date_range('20130101', periods=4)
.sort_values(ascending=False))
self.series_ts_rev = Series(np.random.randn(4),
index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4),
index=dates_rev)
self.panel_ts_rev = Panel(np.random.randn(4, 4, 4),
items=dates_rev)
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self, '%s_%s' % (o, t), None)
setattr(self, o, d)
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs=None,
objs=None, axes=None, fails=None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim - 1:
return
def _print(result, error=None):
if error is not None:
error = str(error)
v = ("%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,"
"key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" %
(name, result, t, o, method1, method2, a, error or ''))
if _verbose:
pprint_thing(v)
try:
rs = getattr(obj, method1).__getitem__(_axify(obj, k1, a))
try:
xp = _get_result(obj, method2, k2, a)
except:
result = 'no comp'
_print(result)
return
detail = None
try:
if is_scalar(rs) and is_scalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
tm.assert_series_equal(rs, xp)
elif xp.ndim == 2:
tm.assert_frame_equal(rs, xp)
elif xp.ndim == 3:
tm.assert_panel_equal(rs, xp)
result = 'ok'
except AssertionError as e:
detail = str(e)
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
_print(result)
if not result.startswith('ok'):
raise AssertionError(detail)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error=detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes, (tuple, list)):
axes = [axes]
else:
axes = list(axes)
else:
axes = [0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self, o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_ix_deprecation(self):
# GH 15114
df = DataFrame({'A': [1, 2, 3]})
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
df.ix[1, 'A']
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
def test_at_and_iat_get(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f, func)[i]
expected = _get_value(f, i, values)
tm.assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self, o)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_and_iat_set(self):
def _check(f, func, values=False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f, func)[i] = 1
expected = _get_value(f, i, values)
tm.assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self, t)
# iat
for f in [d['ints'], d['uints']]:
_check(f, 'iat', values=True)
for f in [d['labels'], d['ts'], d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
for f in [d['ints'], d['uints'], d['labels'],
d['ts'], d['floats']]:
_check(f, 'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days', '2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [lambda: s.iat[1], lambda: s.iloc[1]]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1, 1, 2, 2, 3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result, 2)
result = s.iat[2]
self.assertEqual(result, 2)
self.assertRaises(IndexError, lambda: s.iat[10])
self.assertRaises(IndexError, lambda: s.iat[-10])
result = s.iloc[[2, 3]]
expected = Series([2, 3], [2, 2], dtype='int64')
tm.assert_series_equal(result, expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
tm.assert_series_equal(result, expected)
result = df.iat[2, 0]
expected = 2
self.assertEqual(result, 2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(
np.random.random_sample((20, 5)),
index=['ABCDE' [x % 5] for x in range(20)])
expected = df.loc['A', 0]
result = df.loc[:, 0].loc['A']
tm.assert_series_equal(result, expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError,
'positional indexers are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
self.assertRaises(IndexError, lambda: df.iloc[[1, 30]])
self.assertRaises(IndexError, lambda: df.iloc[[1, -30]])
self.assertRaises(IndexError, lambda: df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda: s.iloc[[100]])
self.assertRaises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assertRaisesRegexp(IndexError, msg):
df.iloc[30]
self.assertRaises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assertRaisesRegexp(IndexError, msg):
s.iloc[30]
self.assertRaises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
self.assertRaises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
self.assertRaises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
self.assertTrue(isnull(result))
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df =
|
concat([df2, df1], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 12:04:06 2020
@author: Feucht
"""
#test github
import numpy as np
import random
import matplotlib.pyplot as plot
from datetime import datetime
import pandas as pd
"-------------------------- CLASSES ------------------------------"
class point6D:
""" First three parameters are for location, next three for the velocities """
def __init__(self,x,y,z,v_x,v_y,v_z):
self.location = np.array([x,y,z])
self.velocity = np.array([v_x,v_y,v_z])
self.velocityMagnitude= np.sqrt(self.velocity[0]**2+self.velocity[1]**2+self.velocity[2]**2)
class body:
""" Body contains the 6D locational parameters, the mass, and an optional name """
def __init__(self,point6D,mass=0,name=""):
self.point6D = point6D
self.mass = mass
self.name = name
"-------------------------- FUNCTIONS ------------------------------"
def run_simulation(bodies_List,timeStep,numberOfSteps):
completeListOfSimulatedBodies = [] # Generate List for all bodies which will contain for each body the point6D data for each step
for currentBody in bodies_List: # Init all body entries in the list completeListOfSimulatedBodies
completeListOfSimulatedBodies.append({"name":currentBody.name, "x":[], "y":[], "z":[],"vx":[],"vy":[],"vz":[],"velocityMagnitude":[]}) # Init: Add body to list with point6D data for each step calculated
#as below the main for-loop, this is added here in order to have the initial values in the list
# for index, body_location in enumerate(completeListOfSimulatedBodies):
# body_location["x"].append(bodies_List[index].point6D.location[0])
# body_location["y"].append(bodies_List[index].point6D.location[1])
# body_location["z"].append(bodies_List[index].point6D.location[2])
# body_location["vx"].append(bodies_List[index].point6D.velocity[0])
# body_location["vy"].append(bodies_List[index].point6D.velocity[1])
# body_location["vz"].append(bodies_List[index].point6D.velocity[2])
# body_location["velocityMagnitude"].append(bodies_List[index].point6D.velocityMagnitude)
for i in range(0,numberOfSteps): # Loop over steps
computeSingleStepOnAll(bodies_List,timeStep)
#now add the entries in the dictionary
report_freq=7
if i % report_freq == 0:
for index, body_location in enumerate(completeListOfSimulatedBodies):
body_location["x"].append(bodies_List[index].point6D.location[0])
body_location["y"].append(bodies_List[index].point6D.location[1])
body_location["z"].append(bodies_List[index].point6D.location[2])
body_location["vx"].append(bodies_List[index].point6D.velocity[0])
body_location["vy"].append(bodies_List[index].point6D.velocity[1])
body_location["vz"].append(bodies_List[index].point6D.velocity[2])
body_location["velocityMagnitude"].append(np.sqrt(bodies_List[index].point6D.velocity[0]**2+bodies_List[index].point6D.velocity[1]**2+bodies_List[index].point6D.velocity[2]**2))
print(f"step {i+1} {datetime.now()}")
return completeListOfSimulatedBodies
def computeSingleStepOnAll(bodies_List,timeStep):
computeNewVelocitiesOnAll(bodies_List,timeStep)
computeNewLocationsOnAll(bodies_List,timeStep)
def computeNewVelocitiesOnAll(bodies_List,timeStep):
for body_index, currentBody in enumerate(bodies_List):
acceleration3D=calculateSingleBodyAcceleration(bodies_List,body_index)
currentBody.point6D.velocity += acceleration3D*timeStep
def computeNewLocationsOnAll(bodies_List,timeStep):
for currentBody in bodies_List:
currentBody.point6D.location[0] += currentBody.point6D.velocity[0] * timeStep
currentBody.point6D.location[1] += currentBody.point6D.velocity[1] * timeStep
currentBody.point6D.location[2] += currentBody.point6D.velocity[2] * timeStep
def calculateSingleBodyAcceleration(bodies_List,body_index):
G_const = 6.67408e-11 #m3 kg-1 s-2
acceleration3D=np.array([0.0,0.0,0.0]) # INIT
i=body_index
currentBody=bodies_List[body_index]
for j, otherBody in enumerate(bodies_List): #
if j != i: #if i=j then we got the same object, do not calc; if i!=j then calc acceleration to all other bodies
distance=np.linalg.norm(currentBody.point6D.location-otherBody.point6D.location)
softening=0.0**2 #immer größer als 0
MasterCalculation= (G_const * otherBody.mass*currentBody.mass)/(distance**2+softening**2)**(3/2)
acceleration3D[0]+=(MasterCalculation * (otherBody.point6D.location[0]-currentBody.point6D.location[0]))/currentBody.mass
acceleration3D[1]+=(MasterCalculation * (otherBody.point6D.location[1]-currentBody.point6D.location[1]))/currentBody.mass
acceleration3D[2]+=(MasterCalculation * (otherBody.point6D.location[2]-currentBody.point6D.location[2]))/currentBody.mass
return acceleration3D #result acceleration after every comparison
def plot_output(bodies, outfile = None):
fig = plot.figure(num=1,figsize=(20, 20), dpi=300)
colours = ['r','b','g','y','m','c']
ax1 = fig.add_subplot(4,1,1, projection='3d',)
ax2 = fig.add_subplot(4,1,2,)
ax3 = fig.add_subplot(4,1,3,)
ax4 = fig.add_subplot(4,1,4,)
max_range = 0
for current_body in bodies:
if current_body["name"]==massMassiveName:
max_dim = max(max(current_body["x"]),max(current_body["y"]),max(current_body["z"]))
if max_dim > max_range:
max_range = max_dim
c = random.choice(colours) #for plotting
ax1.plot(current_body["x"], current_body["y"], current_body["z"], 'r--', label = current_body["name"])
ax2.plot(current_body["x"], current_body["z"], 'r--', label = current_body["name"])
ax3.plot(current_body["x"], current_body["y"], 'r--', label = current_body["name"])
ax4.plot(current_body["y"], current_body["z"], 'r--', label = current_body["name"])
else:
None
ax1.set_xlim([-2500,15000])
ax1.set_ylim([-10000,15000])
ax1.set_zlim([-10000,15000])
ax1.set_xlabel('x label') # Add an x-label to the axes.
ax1.set_ylabel('y label') # Add a y-label to the axes.
ax1.set_zlabel('z label')
ax1.set_title("3D") # Add a title to the axes.
# ax1.legend()
ax2.set_xlim([-2500,15000])
ax2.set_ylim([-10000,15000])
ax2.set_xlabel('x label') # Add an x-label to the axes.
ax2.set_ylabel('z label') # Add a y-label to the axes.
ax2.set_title("x-z von vorne drauf")
# ax2.legend()
ax3.set_xlim([-max_range*1.1,max_range*1.1])
ax3.set_ylim([-max_range*1.1,max_range*1.1])
ax3.set_xlabel('x label') # Add an x-label to the axes.
ax3.set_ylabel('y label') # Add a y-label to the axes.
ax3.set_title("x-y Von oben drauf")
# ax3.legend()
ax4.set_xlim([-2500,1500])
ax4.set_ylim([-10000,15000])
ax4.set_xlabel('y label') # Add an x-label to the axes.
ax4.set_ylabel('z label') # Add a y-label to the axes.
ax4.set_title("y-z von der Seite drauf")
#ax4.legend()
ax1.legend().set_visible(False)
ax2.legend().set_visible(False)
ax3.legend().set_visible(False)
ax4.legend().set_visible(False)
plot.tight_layout(pad=1, w_pad=3, h_pad=1)
if outfile:
plot.savefig(outfile)
else:
plot.show()
"-------------------------- PROGRAM ------------------------------"
print(f"start {0} {datetime.now()}")
x_bound = 5000# therefore -10 to 10
y_bound = 50000
z_bound = 5000
massSmallObjects=5e14
massMassiveObject=5e15
massMassiveName="Massive"
bodies_List=[]
TotalBodies=200
timeStep=0.5#1s000
numberOfSteps=100000
for i in range(TotalBodies-1):
random.seed(i)
bodies_List.append((body(point6D(random.uniform(-x_bound,x_bound),random.uniform(-y_bound,y_bound),random.uniform(-z_bound,z_bound), random.gauss(0, 0.25),random.gauss(0, 0.25),random.gauss(0,0.25)), massSmallObjects,name=f"koerper{i}")))
#Massiv Object
bodies_List.append((body(point6D(-5500.5,0.0, 0.0,25,0.0,0.0),massMassiveObject,name=massMassiveName)))
calculatedSim=run_simulation(bodies_List, timeStep, numberOfSteps)
print(f"Do plots {datetime.now()}")
plot_output(calculatedSim,"test2.png")
table_velocityMagnitude=list()
for index,entry in enumerate(calculatedSim):
table_velocityMagnitude.append(entry["velocityMagnitude"])
df1=
|
pd.DataFrame(table_velocityMagnitude)
|
pandas.DataFrame
|
"""
Load data to database
"""
import os
import pandas as pd
def apply_adjustment(df, adj_date, adj_value,
adj_type='mul',date_col='date',
cols=['open','high', 'low', 'close']):
"""
Apply adjustment to a given stock
df
dataframe of the given stock
adj_date
date from which the adjustment is
to be made
adj_value
value to be adjusted
adj_type
method of adjustment **mul/sub**
mul means multiply all the values
such as splits and bonuses
sub means subtract the values
such as dividends
date_col
date column on which the adjustment
is to be applied
cols
columns to which the adjustment is to
be made
Notes
-----
1) You can use negative values to add to the
stock value by using **adj_type=sub**
2) Adjustment is applied prior to all dates
in the dataframe
3) In case your dataframe has date or
symbol as indexes, reset them
"""
df = df.set_index(date_col).sort_index()
values_on_adj_date = df.loc[adj_date, cols].copy()
if adj_type == "mul":
adjusted_values = (df.loc[:adj_date, cols] * adj_value).round(2)
elif adj_type == "sub":
adjusted_values = (df.loc[:adj_date, cols] - adj_value).round(2)
else:
raise ValueError('adj_type should be either mul or sub')
df.loc[:adj_date, cols] = adjusted_values
df.loc[adj_date, cols] = values_on_adj_date
return df.reset_index()
class DataLoader(object):
"""
Data Loader class
"""
def __init__(self, directory, mode='HDF', engine=None,
tablename=None):
"""
Initialize parameters
directory
directory to search files
mode
HDF/SQL - should be explicitly specified
engine
filename in case of HDF
SQL Alchemy connection string in case of engine
tablename
table where data is to be written
parse dates
list of columns to be parsed as date
"""
if mode not in ['SQL', 'HDF']:
raise TypeError('No mode specified; should be HDF or SQL')
self.directory = directory
self.mode = mode
self.engine = engine
self.tablename = tablename
def _initialize_HDF_file(self):
import hashlib
hash = hashlib.sha1().hexdigest()
with pd.HDFStore(self.engine) as store:
s = pd.Series(['hash'*2])
if len(store.keys()) == 0:
store.append('updated/'+self.tablename, s)
def _write_to_HDF(self, **kwargs):
"""
Write data to HDF file
"""
update_table = '/updated/' + self.tablename
data_table = '/data/' + self.tablename
updated_list = []
with pd.HDFStore(self.engine) as store:
if update_table in store.keys():
updated_list = store.get(update_table).values
if kwargs.get('columns'):
columns = kwargs.pop('columns')
else:
columns = None
if kwargs.get('parse_dates'):
parse_dates = kwargs.get('parse_dates')
else:
parse_dates = None
if kwargs.get('postfunc'):
postfunc = kwargs.pop('postfunc')
else:
postfunc = None
# Iterating over the files
for root, direc, files in os.walk(self.directory):
for file in files:
if file not in updated_list:
filename = os.path.join(root, file)
df = pd.read_csv(filename, **kwargs)
df = df.rename(str.lower, axis='columns')
if columns:
df = df.rename(columns, axis='columns')
if not(parse_dates):
date_cols = ['date', 'time', 'datetime', 'timestamp']
for c in df.columns:
if c in date_cols:
df[c] = pd.to_datetime(df[c])
if postfunc:
df = postfunc(df, file, root)
df.to_hdf(self.engine, key=data_table, format='table',
append=True, data_columns=True)
# Updating the file data
pd.Series([file]).to_hdf(self.engine, key=update_table,
format='table', append=True)
def _write_to_SQL(self, **kwargs):
"""
Write data to SQL database
"""
update_table = 'updated_' + self.tablename
data_table = self.tablename
updated_list = []
if self.engine.has_table(update_table):
updated_list = pd.read_sql_table(update_table, self.engine).values
if kwargs.get('columns'):
columns = kwargs.pop('columns')
else:
columns = None
if kwargs.get('parse_dates'):
parse_dates = kwargs.get('parse_dates')
else:
parse_dates = None
if kwargs.get('postfunc'):
postfunc = kwargs.pop('postfunc')
else:
postfunc = None
# Iterating over the files
for root, direc, files in os.walk(self.directory):
for file in files:
if file not in updated_list:
filename = os.path.join(root, file)
df = pd.read_csv(filename, **kwargs)
df = df.rename(str.lower, axis='columns')
if columns:
df = df.rename(columns, axis='columns')
if not(parse_dates):
date_cols = ['date', 'time', 'datetime', 'timestamp']
for c in df.columns:
if c in date_cols:
df[c] = pd.to_datetime(df[c])
if postfunc:
df = postfunc(df, file, root)
s = pd.Series([file])
df.to_sql(data_table, con=self.engine, if_exists='append',
index=False, chunksize=1500)
# Updating the file data
s.to_sql(update_table, con=self.engine, if_exists='append',
index=False, chunksize=1500)
def load_data(self, **kwargs):
"""
Load data into database
kwargs
columns
column names as dictionary
with key being column name from file
and value being the column to be renamed
```
{'OPENING': 'open', 'CLOSING': 'close'}
```
parse_dates
columns to be parsed as list
If not given, any column with name
date, datetime, time, timestamp is
automatically parse
postfunc
function to be run after reading the csv file
kwargs
Any other arguments to the pandas read_csv function
"""
if self.mode == 'HDF':
self._write_to_HDF(**kwargs)
else:
self._write_to_SQL(**kwargs)
def apply_splits(self, directory='adjustments',
filename='splits.csv', symbol='symbol', timestamp='date'):
"""
Apply splits recursively
By default, only open, high, low, close and volume columns
are modified
"""
filename = os.path.join(directory, filename)
try:
splits = pd.read_csv(filename, parse_dates=[timestamp])
except Exception as e:
print(e)
if self.mode == 'SQL':
df = pd.read_sql_table(self.tablename, self.engine)
for i, row in splits.iterrows():
q = 'symbol == "{sym}"'
temp = df.query(q.format(sym=row.at[symbol]))
params = {
'adj_date': row.at[timestamp],
'adj_value': row.at['from']/row.at['to'],
'adj_type': 'mul',
'date_col': timestamp,
'cols': ['open', 'high', 'low', 'close']
}
temp = apply_adjustment(temp, **params)
params.update({
'adj_value': row.at['to'] / row.at['from'],
'cols': ['volume']
})
temp = apply_adjustment(temp, **params)
cols = ['open', 'high', 'low', 'close', 'volume']
temp.index = df.loc[df[symbol] == row.at[symbol]].index
df.loc[temp.index] = temp
df.to_sql(self.tablename, self.engine, if_exists='replace', index=False)
elif self.mode == 'HDF':
df = pd.read_hdf(self.engine, '/data/'+ self.tablename)
df.index = range(len(df))
for i, row in splits.iterrows():
q = 'symbol == "{sym}"'
temp = df.query(q.format(sym=row.at[symbol]))
params = {
'adj_date': row.at[timestamp],
'adj_value': row.at['from']/row.at['to'],
'adj_type': 'mul',
'date_col': timestamp,
'cols': ['open', 'high', 'low', 'close']
}
temp = apply_adjustment(temp, **params)
params.update({
'adj_value': row.at['to'] / row.at['from'],
'cols': ['volume']
})
temp = apply_adjustment(temp, **params)
cols = ['open', 'high', 'low', 'close', 'volume']
temp.index = df.loc[df[symbol] == row.at[symbol]].index
df.loc[temp.index] = temp
df.to_hdf(self.engine, key='/data/'+self.tablename, format='table',
data_columns=True)
def collate_data(directory, function=None, concat=True, **kwargs):
"""
Given a directory of csv files with similar structure,
create a dataframe by concantenating all files
directory
directory with the files. All files should
be of the same structure and there should
be no sub-directory inside it
function
function to be run on each file
By default, pandas read_csv function is
run on each file. If you specify your own
function, it should have only filename
as its argument and must return a dataframe
concat
whether you want to concat results into a
single dataframe
default **True**
if False, a list is returned
kwargs
kwargs for the pandas read_csv function
Note
-----
If your data cannot return a dataframe, pass your
own function and set concat=False to return a list
"""
collect = []
for root, directory, files in os.walk(directory):
for file in files:
filename = os.path.join(root, file)
if function is None:
temp = pd.read_csv(filename, **kwargs)
else:
temp = function(filename)
collect.append(temp)
if concat:
result =
|
pd.concat(collect)
|
pandas.concat
|
"""Class for intent operations - training, predict"""
import os
import re
import json
import datetime
import joblib
import numpy as np
import pandas as pd
from typing import List, Union
from sklearn.model_selection import GridSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import StandardScaler, MultiLabelBinarizer
from sklearn.ensemble import RandomForestClassifier
from .base import DatasetBunch, RuleBunch, Classifier
from .utils import get_intent_labels, make_dir
from .transformer import PercentSVD
DEFAULT_FOLDER = os.path.join(os.getcwd(), "models")
class OneClassClassifier(Classifier):
"""Classifier used for dataset which has only one class."""
def __init__(self, intent: str):
self._intent = intent
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
return [self._intent]
class RuleClassifier(Classifier):
"""Rule-based classifier"""
def __init__(self, rule_bunch: RuleBunch):
self._patterns = [re.compile(r) if r else None
for r in rule_bunch.words_rules]
try:
self._context_rules = \
[json.loads(r) if r else {} for r in rule_bunch.context_rules] \
if rule_bunch.context_rules else []
except AttributeError:
self._context_rules = []
self._intent_labels = rule_bunch.intent_labels
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
"""
Predict intent labels according to words patterns and comparision
between context and context_rule.
Parameters
----------
words: user input
context: context information
Returns
-------
List of predicted labels or empty list if failed to the matches.
"""
def context_match(context: dict, rule_context: dict) -> bool:
if not rule_context:
return True
else:
return False if not context else \
all(rule_context.get(k) == v for k, v in context.items())
# make sure the context to be a dict
if not context:
context = {}
else:
if isinstance(context, str):
context = json.loads(context)
if not words and not context:
return []
intent_labels = []
for i, pattern in enumerate(self._patterns):
if not self._context_rules:
if pattern.match(words):
for label in self._intent_labels[i]:
intent_labels.append(label)
else:
if pattern.match(words) and \
context_match(context, self._context_rules[i]):
for label in self._intent_labels[i]:
intent_labels.append(label)
return intent_labels
class ModelClassifier(Classifier):
def __init__(self, folder: str=DEFAULT_FOLDER, customer: str="common",
lang="en", n_jobs=None):
"""
Parameters
----------
folder: The folder to save the final models.
customer: Name used to distinguish different customers.
lang: Language, "en" for English or "cn" for Chinese.
n_jobs : n_jobs in GridSearchCV, int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
self._folder = folder
self._customer = customer
self._lang = lang
self._n_jobs = n_jobs
self._classifiers = {}
self._mlbs = {}
self._reports = {}
def fit(self, data_bunch: DatasetBunch):
"""
Fit with GridSearchCV method to find the optimal parameters.
Disassemble the intents in form of multi-levels to get sub-datasets
and train models using these sub-datasets.
Parameters
----------
data_bunch: Data bunch instance with texts, extended_features, intents.
"""
def make_choice(labels: str, prefixs: set) -> bool:
for label in labels.replace(" ", "").split(","):
for prefix in prefixs:
if label.startswith(prefix):
return True
else:
return False
def make_labels(labels_data: np.array, label_set: set) -> List[List[str]]:
labels = []
for labels_str in labels_data:
lbls = []
for label in labels_str.replace(" ", "").split(","):
lbls += [lbl for lbl in label_set if label.startswith(lbl)]
labels.append(lbls)
return labels
make_choice_vect = np.vectorize(make_choice)
for clf_name, label_set in get_intent_labels(data_bunch.intents).items():
if len(label_set) == 1:
self._classifiers[clf_name] = \
OneClassClassifier(list(label_set)[0])
self._reports[clf_name] = {"clf_type": "OneClassClassifier"}
else:
choices = make_choice_vect(data_bunch.intents, label_set)
mlb = MultiLabelBinarizer(classes=list(label_set))
search = self._fit(
X=pd.DataFrame({
"words": data_bunch.words[choices],
"contexts": [json.loads(c) if c else {}
for c in data_bunch.contexts[choices]]}),
y=mlb.fit_transform(
make_labels(data_bunch.intents[choices], label_set))
)
self._classifiers[clf_name] = search.best_estimator_
self._mlbs[clf_name] = mlb
self._reports[clf_name] = {
"clf_type": "sklearn-classifier",
"scoring": search.scoring,
"cv": search.cv,
"best_params": search.best_params_,
"best_score": search.best_score_,
}
def _fit(self, X: pd.DataFrame, y: np.array):
"""Fit classifier
Parameters
----------
# X: pd.DataFrame with columns "words" and "contexts".
X: tuple of "words" and "contexts".
y: intent labels
Returns
-------
Instance of sklearn classifier or OneClassClassifier.
"""
def has_context(contexts):
if contexts.empty:
return False
for context in contexts:
if not context:
continue
if json.loads(context):
return True
else:
return False
if has_context(X["contexts"]):
vectorizer = ColumnTransformer([
# words to vectors
("words2vect",
TfidfVectorizer(token_pattern=r"(?u)(\{\w+\}|\w+)"),
"words"),
# contexts to vectors
("contexts2vect", DictVectorizer(), "contexts")
])
else:
vectorizer = ColumnTransformer([
# words to vectors
("words2vect",
TfidfVectorizer(token_pattern=r"(?u)(\{\w+\}|\w+)"),
"words")
])
pipeline = Pipeline([
# transform words and contexts to vectors
("vectorizer", vectorizer),
# feature values standardization
("scaler", StandardScaler(with_mean=False)),
# dimensionality reduction
# ("svd", PercentSVD()),
# classifier
("clf", RandomForestClassifier())
# ("clf", MLPClassifier(max_iter=1000, hidden_layer_sizes=(50, 50)))
])
params = {
# "svd__percent": np.linspace(0.1, 1, 10), # todo
"clf__n_estimators": range(5, 100, 5),
"clf__max_features": [None, "sqrt", "log2"],
"clf__class_weight": ["balanced", "balanced_subsample", None],
# "clf__hidden_layer_sizes": [(n,) for n in range(10, 110, 10)],
# "clf__activation": ["identity", "logistic", "tanh", "relu"],
# "clf__solver": ["lbfgs", "sgd", "adam"],
# "clf__learning_rate": ["constant", "invscaling", "adaptive"]
}
search = GridSearchCV(estimator=pipeline, param_grid=params, cv=5,
n_jobs=self._n_jobs)
search.fit(X, y)
return search
def predict(self, words: str="", context: Union[str, dict]=None) -> List[str]:
"""
Parameters
----------
words: user input
context: context information
Returns
-------
List of predicted labels.
"""
if not context:
X = pd.DataFrame({"words": [words], "contexts": ["{}"]})
else:
if isinstance(context, str):
X =
|
pd.DataFrame({"words": [words], "contexts": [context]})
|
pandas.DataFrame
|
#!/usr/bin/env python3
import pandas as pd
import matplotlib.pyplot as plt
def app(r, tutu):
ret = 0
if tutu is not None:
if r['Religeon.1.Name'] == '1.'+tutu:
ret += r['Religeon.1.Population']
if r['Religeon.2.Name'] == '2.'+tutu:
ret += r['Religeon.2.Population']
if r['Religeon.3.Name'] == '3.'+tutu:
ret += r['Religeon.3.Population']
else:
if not (r['Religeon.1.Name'] == '1.Hindus' or
r['Religeon.1.Name'] == '1.Muslims' or
r['Religeon.1.Name'] == '1.Christians'):
ret += r['Religeon.1.Population']
if not (r['Religeon.2.Name'] == '2.Hindus' or
r['Religeon.2.Name'] == '2.Muslims' or
r['Religeon.2.Name'] == '2.Christians'):
ret += r['Religeon.2.Population']
if not (r['Religeon.3.Name'] == '3.Hindus' or
r['Religeon.3.Name'] == '3.Muslims' or
r['Religeon.3.Name'] == '3.Christians'):
ret += r['Religeon.3.Population']
return ret
def explore(f):
df = pd.read_csv(f)
# Now something else
# print(df['Religeon.3.Name'][:20])
# Christians
df['Total'] = (df['Religeon.1.Population'] +
df['Religeon.2.Population'] + df['Religeon.3.Population'])
df['Hindus'] = df.apply(lambda x: app(x, 'Hindus'), axis=1)
df['Muslims'] = df.apply(lambda x: app(x, 'Muslims'), axis=1)
df['Christians'] = df.apply(lambda x: app(x, 'Christians'), axis=1)
df['Others'] = df.apply(lambda x: app(x, None), axis=1)
df_states = df.groupby(['State'])
rels = df_states[['Hindus', 'Muslims', 'Christians',
'Others', 'Total']].sum()
rels['Hindus'] = (rels['Hindus'] / rels['Total']) * 100.0
rels['Muslims'] = (rels['Muslims'] / rels['Total']) * 100.0
rels['Christians'] = (rels['Christians'] / rels['Total']) * 100.0
rels['Others'] = (rels['Others'] / rels['Total']) * 100.0
rels = rels[['Hindus', 'Muslims', 'Christians', 'Others']]
# ax = rels.plot(kind='bar')
# ax.set_xticklabels(rels.index, rotation='vertical')
# Literacy rate/state.
lit = df_states[['Males..Literate', 'Females..Literate',
'Persons..literate', 'Total']].sum()
lit['popu_lit_rate'] = lit['Persons..literate']/lit['Total']*100
lit['male_lit_rate'] = (lit['Males..Literate'] /
lit['Persons..literate']*100)
lit['female_lit_rate'] = (lit['Females..Literate'] /
lit['Persons..literate']*100)
lit = lit[['popu_lit_rate', 'female_lit_rate', 'male_lit_rate']]
# lit = lit[['male_lit_rate', 'female_lit_rate',
# 'popu_lit_rate']].join(rels)
lit1 = lit[['popu_lit_rate']].join(rels)
print(lit1.sort_values(by='popu_lit_rate', ascending=False))
lit2 = lit[['male_lit_rate']].join(rels)
lit3 = lit[['female_lit_rate']].join(rels)
lit1.plot(kind='bar')
lit2.plot(kind='bar')
lit3.plot(kind='bar')
# set_xticklabels(lit.index, rotation='vertical')
plt.show()
def gt1(f):
df = pd.read_csv(f, encoding="ISO-8859-1")
df_country = df[df['success'] == 1].groupby('country_txt')
df_country_c = df_country['eventid'].count()
df_country_c.sort(ascending=False)
ax = df_country_c[:10].plot(kind='barh', grid=True)
ax.set_ylabel('Country')
ax.set_xlabel('Number of successful terror attacks')
ax.get_figure().savefig('/tmp/top10.png', bbox_inches='tight')
def gt2(f):
df = pd.read_csv(f, encoding="ISO-8859-1")
f_india_tot = df[df['country_txt'] == 'India'].groupby('iyear').count()
f_pak_tot = df[df['country_txt'] == 'Pakistan'].groupby('iyear').count()
# India
df_india = df[(df['country_txt'] == 'India')
& (df['success'] == 1)]
df_india_year = df_india.groupby('iyear')
f_india = df_india_year.count()
# Pakistan
df_pak = df[(df['country_txt'] == 'Pakistan')
& (df['success'] == 1)]
df_pak_year = df_pak.groupby('iyear')
f_pak = df_pak_year.count()
f_countries = pd.DataFrame(index=f_india.index)
f_countries['India'] = f_india['eventid']
f_countries['Pakistan'] = f_pak['eventid']
axi = f_countries.plot(kind='bar', grid=True)
axi.set_ylabel('Number of successful terror attacks')
axi.set_xlabel('Year')
axi.set_xticklabels(f_countries.index.map(lambda x: "'"+str(x)[2:]))
axi.get_figure().savefig('/tmp/india_pak.png', bbox_inches='tight')
f_eff =
|
pd.DataFrame(index=f_india.index)
|
pandas.DataFrame
|
import os
import logging
import pandas as pd
import datetime as dt
import shortuuid
import json
import re
from azureml.core import Run
from azureml.core import Dataset
from azureml.core import Experiment
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
from azureml.automl.core.featurization import FeaturizationConfig
from .project import AzureProject
from .exceptions import AzureException
from a2ml.api.utils.decorators import error_handler, authenticated
from .credentials import Credentials
from a2ml.api.utils.formatter import print_table
from a2ml.api.azure.dataset import AzureDataset
from a2ml.api.utils import fsclient
class AzureExperiment(object):
def __init__(self, ctx):
super(AzureExperiment, self).__init__()
self.ctx = ctx
self.credentials = Credentials(self.ctx).load()
@error_handler
@authenticated
def list(self):
ws = AzureProject(self.ctx)._get_ws()
experiments = Experiment.list(workspace=ws)
nexperiments = len(experiments)
experiments = [e.name for e in experiments]
for name in experiments:
self.ctx.log(name)
self.ctx.log('%s Experiment(s) listed' % str(nexperiments))
return {'experiments': experiments}
@staticmethod
def _map_metric_a2ml_to_azure(metric):
if metric == "r2":
metric = "r2_score"
elif metric == "precision_weighted":
metric == "precision_score_weighted"
return metric
@staticmethod
def _map_metric_azure_to_a2ml(metric):
if metric == "r2_score":
metric = "r2"
elif metric == "precision_score_weighted":
metric == "precision_weighted"
return metric
@error_handler
@authenticated
def start(self):
model_type = self.ctx.config.get('model_type')
if not model_type:
raise AzureException('Please specify model type...')
if model_type == 'timeseries':
model_type = 'forecasting'
primary_metric = self._map_metric_a2ml_to_azure(self.ctx.config.get('experiment/metric'))
if not primary_metric:
raise AzureException('Please specify primary metric...')
#TODO: check if primary_metric is constent with model_type
target = self.ctx.config.get('target')
if not target:
raise AzureException('Please specify target column...')
dataset_name = self.ctx.config.get('dataset', None)
if dataset_name is None:
raise AzureException('Please specify Dataset name...')
experiment_name = self._fix_experiment_name(
self.ctx.config.get('experiment/name', dataset_name))
self.ctx.log("Starting search on %s Dataset..." % dataset_name)
exclude_columns = self.ctx.config.get_list('exclude', [])
if target in exclude_columns:
exclude_columns.remove(target)
project = AzureProject(self.ctx)
ws = project._get_ws()
dataset = Dataset.get_by_name(ws, dataset_name)
if exclude_columns:
dataset = dataset.drop_columns(exclude_columns)
compute_target, cluster_name = self._get_compute_target(ws, project)
automl_settings = {
"iteration_timeout_minutes" : self.ctx.config.get(
'experiment/max_eval_time',10),
"iterations" : self.ctx.config.get(
'experiment/max_n_trials',10),
"primary_metric" : primary_metric,
"verbosity" : logging.INFO,
"enable_stack_ensemble": self.ctx.config.get(
'experiment/use_ensemble', False),
"enable_voting_ensemble": self.ctx.config.get(
'experiment/use_ensemble', False),
}
validation_data = None
if self.ctx.config.get('experiment/validation_source'):
if self.ctx.config.get('experiment/validation_dataset'):
validation_data = Dataset.get_by_name(ws, self.ctx.config.get('experiment/validation_dataset'))
if not validation_data:
res = AzureDataset(self.ctx).create(
source = self.ctx.config.get('experiment/validation_source'),
validation = True
)
training_data_columns = AzureDataset(self.ctx, ws)._columns(dataset)
training_data_columns.remove(target)
validation_data = Dataset.get_by_name(ws, res['dataset']).keep_columns(training_data_columns)
else:
self.ctx.config.remove('experiment/validation_dataset')
self.ctx.config.write()
automl_settings["n_cross_validations"] = self.ctx.config.get(
'experiment/cross_validation_folds', 5)
if self.ctx.config.get('experiment/validation_size'):
automl_settings["validation_size"] = self.ctx.config.get('experiment/validation_size')
if self.ctx.config.get('experiment/max_total_time'):
automl_settings["experiment_timeout_hours"] = float(self.ctx.config.get('experiment/max_total_time'))/60.0
if self.ctx.config.get('experiment/exit_score'):
automl_settings["experiment_exit_score"] = float(self.ctx.config.get('experiment/exit_score'))
if self.ctx.config.get('experiment/max_cores_per_trial'):
automl_settings["max_cores_per_iteration"] = self.ctx.config.get('experiment/max_cores_per_trial')
if self.ctx.config.get('experiment/max_concurrent_trials'):
automl_settings["max_concurrent_iterations"] = self.ctx.config.get('experiment/max_concurrent_trials')
if self.ctx.config.get('experiment/blocked_models'):
automl_settings["blocked_models"] = self.ctx.config.get_list('experiment/blocked_models')
if self.ctx.config.get('experiment/allowed_models'):
automl_settings["allowed_models"] = self.ctx.config.get_list('experiment/allowed_models')
# if self.ctx.config.get('exclude'):
# fc = FeaturizationConfig()
# fc.drop_columns = self.ctx.config.get('exclude').split(",")
# automl_settings["featurization"] = fc
# It should be empty folder
snapshot_path = os.path.join(os.getcwd(), ".azureml")
#fsclient.create_folder()
automl_config = AutoMLConfig(
task = model_type,
debug_log = 'automl_errors.log',
path = snapshot_path,
compute_target = compute_target,
training_data = dataset,
validation_data = validation_data,
label_column_name = target,
model_explainability = True, #To get feature importance
**automl_settings)
experiment = Experiment(ws, experiment_name)
run = experiment.submit(automl_config, show_output = False)
self.ctx.log("Started Experiment %s search..." % experiment_name)
self.ctx.config.set('experiment/name', experiment_name)
self.ctx.config.set('cluster/name', cluster_name)
self.ctx.config.set('experiment/run_id', run.run_id)
self.ctx.config.write()
return {'experiment_name': experiment_name, 'run_id': run.run_id}
@error_handler
@authenticated
def stop(self, run_id = None):
ws = AzureProject(self.ctx)._get_ws()
experiment_name = self.ctx.config.get('experiment/name', None)
if experiment_name is None:
raise AzureException('Please specify Experiment name...')
if run_id is None:
run_id = self.ctx.config.get('experiment/run_id', None)
if run_id is None:
raise AzureException(
'Please provide Run ID (experiment/run_id)...')
experiment = Experiment(ws, experiment_name)
run = AutoMLRun(experiment = experiment, run_id = run_id)
run.cancel()
return {'stopped': experiment_name}
@error_handler
@authenticated
def leaderboard(self, run_id = None):
ws = AzureProject(self.ctx)._get_ws()
experiment_name = self.ctx.config.get('experiment/name', None)
if experiment_name is None:
raise AzureException('Please specify Experiment name...')
if run_id is None:
run_id = self.ctx.config.get('experiment/run_id', None)
if run_id is None:
raise AzureException(
'Please provide Run ID (experiment/run_id) to evaluate')
experiment = Experiment(ws, experiment_name)
run = AutoMLRun(experiment = experiment, run_id = run_id)
leaderboard, trials_count = self._get_leaderboard(run)
leaderboard = leaderboard.to_dict('records')
self.ctx.log('Leaderboard for Run %s' % run_id)
headers = []
if leaderboard:
headers = list(leaderboard[0].keys())[:3]
print_table(self.ctx.log, leaderboard, headers)
provider_status = run.get_status()
status = self._map_provider_status(provider_status)
result = {
'run_id': run_id,
'leaderboard': leaderboard,
'trials_count': trials_count,
'status': status,
'provider_status': provider_status,
}
if status == 'error':
result['error'] = run.properties.get('errors')
result['error_details'] = run.get_details().get('error', {}).get('error', {}).get('message')
self.ctx.log('Status: %s, Error: %s, Details: %s' % (
status, result['error'], result['error_details']
))
self.ctx.log_debug(run.get_details().get('error'))
else:
self.ctx.log('Status: %s' % status)
return result
def _map_provider_status(self, provider_status):
# * NotStarted - This is a temporary state client-side Run objects are in before cloud submission.
# * Starting - The Run has started being processed in the cloud. The caller has a run ID at this point.
# * Provisioning - Returned when on-demand compute is being created for a given job submission.
# * Preparing - The run environment is being prepared:
# * docker image build
# * conda environment setup
# * Queued - The job is queued in the compute target. For example, in BatchAI the job is in queued state
# while waiting for all the requested nodes to be ready.
# * Running - The job started to run in the compute target.
# * Finalizing - User code has completed and the run is in post-processing stages.
# * CancelRequested - Cancellation has been requested for the job.
# * Completed - The run completed successfully. This includes both the user code and run
# post-processing stages.
# * Failed - The run failed. Usually the Error property on a run will provide details as to why.
# * Canceled - Follows a cancellation request and indicates that the run is now successfully cancelled.
# * NotResponding - For runs that have Heartbeats enabled, no heartbeat has been recently sent.
if provider_status == 'NotStarted' or provider_status == 'Starting' or \
provider_status == 'Provisioning' or provider_status == 'Preparing' or \
provider_status == 'Queued':
return "preprocess"
if provider_status == 'Running' or provider_status == 'Finalizing':
return "started"
if provider_status == 'Completed':
return "completed"
if provider_status == 'Failed':
return "error"
if provider_status == 'CancelRequested' or provider_status == 'Canceled':
return "interrupted"
@error_handler
@authenticated
def get_experiment_settings(self):
return
@error_handler
@authenticated
def history(self):
ws = AzureProject(self.ctx)._get_ws()
experiment_name = self.ctx.config.get('experiment/name', None)
if experiment_name is None:
raise AzureException('Please specify Experiment name...')
experiment = Experiment(ws, experiment_name)
runs = Run.list(experiment)
result = []
for run in runs:
details = run.get_details()
st = dt.datetime.strptime(
details['startTimeUtc'], '%Y-%m-%dT%H:%M:%S.%fZ')
et = dt.datetime.strptime(
details['endTimeUtc'], '%Y-%m-%dT%H:%M:%S.%fZ')
duratin = str(et-st)
result.append({
'id': run.id,
'start time': details['startTimeUtc'],
'duratin': duratin,
'status': details['status']
})
print_table(self.ctx.log, result)
return {'history': result}
def _fix_experiment_name(self, name):
# Experiment name must be between 1 and 255 characters long.
# Its first character has to be alphanumeric, and the rest may contain hyphens and underscores.
# No whitespace is allowed.
name = re.sub(r'\W+', '-', name)
name = name[:255]
return name
def _get_compute_target(self, ws, project):
local_cluster = project.get_cluster_config(name=None, local_config=True, ws=ws)
project.update_cluster_config(name=None, params=local_cluster, ws=ws,
allow_create=not self.ctx.is_runs_on_server())
return ws.compute_targets[local_cluster['name']], local_cluster['name']
def _get_leaderboard(self, experiment_run):
primary_metric = self._map_metric_azure_to_a2ml(experiment_run.properties['primary_metric'])
task_type = ""
if experiment_run.properties.get("AMLSettingsJsonString"):
settings = json.loads(experiment_run.properties.get("AMLSettingsJsonString"))
task_type = settings.get('task_type')
children = list(experiment_run.get_children(recursive=True))
leaderboard =
|
pd.DataFrame(index=['model id', 'algorithm', 'score', 'fit_time', 'algorithm_name', 'algorithm_params', 'preprocessor', 'primary_metric', "all_scores", 'task_type'])
|
pandas.DataFrame
|
import os
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import seaborn as sns
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
from scipy.spatial import distance
from scipy.stats import gaussian_kde, binom
from numpy.random import RandomState
rand = RandomState()
import pickle
import pystan
from mycolours import *
def my_plot_configs():
plt.style.use('seaborn-paper')
plt.rcParams["figure.frameon"] = False
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Helvetica'
plt.rcParams['axes.labelweight'] = 'bold'
def fig_save(fig, Plot_Folder, fname):
fig.savefig(os.path.join (Plot_Folder, fname),dpi=500)
fig.savefig(os.path.join (Plot_Folder, fname + "." + 'pdf'), format='pdf', Transparent=True)
# fig.savefig(os.path.join(Plot_Folder, fname + "." + 'pdf'), format='pdf')
fig.savefig(os.path.join (Plot_Folder, fname + "." + 'svg'), format='svg')
class DSA1():
def __init__(self, df=None, df_main=None, a=0.1, b=0.6, g=0.2, d=0.4, l=0.0, r_I=1e-6, r_V=1e-6, parent=None, **kwargs):
self.df = df
self.df_main = df_main
self.a = a
self.b = b
self.g = g
self.d = d
self.l = l
self.r_I = r_I
self.r_V = r_V
self.parent = parent
self.T = np.ceil(self.df['exit_time'].max())
if kwargs.get('timepoints') is None:
self.timepoints = np.linspace(0.0, self.T, 10000)
else:
self.timepoints = kwargs.get('timepoints')
@classmethod
def SVIR_ODE(cls, t, y, a, b, g, d, l):
dydt = np.zeros(4)
dydt[0] = -b*y[0]*y[2] - d*y[0]
dydt[1] = d*y[0] - a*l*y[1] - (1-a)*b*y[1]*y[2]
dydt[2] = b*y[0]*y[2] + (1-a)*b*y[1]*y[2] - g*y[2]
dydt[3] = a*l*y[1] + g*y[2]
return dydt
@classmethod
def SVIR_Extended_ODE(cls, t, y, a, b, g, d, l):
dydt = np.zeros(5)
dydt[0] = -b*y[0]*y[2] - d*y[0]
dydt[1] = d*y[0] - a*l*y[1] - (1 - a)*b*y[1]*y[2]
dydt[2] = b*y[0]*y[2] + (1 - a)*b*y[1]*y[2] - g*y[2]
dydt[3] = a*l*y[1] + g*y[2]
dydt[4] = -b*y[0]*y[2] - (1 - a)*b*y[1]*y[2]
# dydt[4] = -b * y[0] * y[2]
return dydt
@classmethod
def draw_parms_prior(cls, a_bound=(0.09, 0.11),
b_bound=(1 / 5.6, 0.75),
g_bound=(0.5 / 5.6, 2 / 5.6),
d_bound=(0.4, 1.0),
l_bound=(0, 1e-3),
r_V_bound=(0.15, 0.25),
r_I_bound=(1e-6, 5e-1),
nSample=1):
a_sample = np.random.uniform(low=a_bound[0], high=a_bound[1], size=nSample)
b_sample = np.random.uniform(low=b_bound[0], high=b_bound[1], size=nSample)
g_sample = np.random.uniform(low=g_bound[0], high=g_bound[1], size=nSample)
d_sample = np.random.uniform(low=d_bound[0], high=d_bound[1], size=nSample)
l_sample = np.random.uniform(low=l_bound[0], high=l_bound[1], size=nSample)
r_V_sample = np.random.uniform(low=r_V_bound[0], high=r_V_bound[1], size=nSample)
r_I_sample = np.random.uniform(low=r_I_bound[0], high=r_I_bound[1], size=nSample)
return a_sample, b_sample, g_sample, d_sample, l_sample, r_V_sample, r_I_sample
@property
def R0(self):
return 1.0 * self.b/self.g
@property
def kT(self):
if self.parent is None:
return self.df['exit_time'].shape[0]
else:
return self.parent.kT
@property
def rescale(self):
return 1 - self.S(self.T)
@property
def n(self):
return self.kT / self.rescale
@property
def sT(self):
return self.n - self.kT
@property
def theta(self):
return [self.a, self.b, self.g, self.d, self.l, self.r_I, self.r_V]
@property
def S(self):
a, b, g, d, l, r_I, r_V = self.theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V, self.r_I, 0.0]
ode_fun = lambda t, y: DSA.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
return S
def add_fits(self, samples):
fits = []
l = np.size(samples, axis=0)
for i in range(l):
a, b, g, d, l, r_I, r_V = samples[i]
fit = DSA1(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V, parent=self)
fits.append(fit)
self.fits = fits
return self
def compute_density(self, theta):
a, b, g, d, l, r_I, r_V = theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V, self.r_I, 0.0]
ode_fun = lambda t, y: DSA1.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
I = interp1d(t_eval, sol.y[2])
out = []
ST = S(self.T)
for x in self.timepoints:
Sx = S(x)
Ix = I(x)
out.append((b*Sx*Ix + d*Sx)/(1-ST))
return out
def plot_density_fit_posterior(self, samples):
nSamples = np.size(samples, axis=0)
Ds = np.zeros((nSamples, len(self.timepoints)), dtype=np.float)
for idx in range(nSamples):
Ds[idx] = self.compute_density(samples[idx])
Dslow = np.quantile(Ds, q=0.025, axis=0)
Dshigh = np.quantile(Ds, q=0.975, axis=0)
Dmean = np.mean(Ds, axis=0)
fig = plt.figure()
plt.plot(self.timepoints, Dmean, '-', color=forrest['forrest3'].get_rgb(), lw=3)
plt.plot(self.timepoints, Dslow, '--', color=forrest['forrest3'].get_rgb(), lw=1)
plt.plot(self.timepoints, Dshigh, '--', color=forrest['forrest3'].get_rgb(), lw=1)
# plt.axvline(x=self.T, color=junglegreen['green3'].get_rgb(), linestyle='-')
mirrored_data = (2 * self.T - self.df['exit_time'].values).tolist()
combined_data = self.df['exit_time'].values.tolist() + mirrored_data
dense = gaussian_kde(combined_data)
denseval = list(dense(x) * 2 for x in self.timepoints)
plt.plot(self.timepoints, denseval, '-', color=purplybrown['purplybrown4'].get_rgb(), lw=3)
plt.fill_between(self.timepoints, Dslow, Dshigh, alpha=.3, color=forrest['forrest1'].get_rgb())
plt.legend()
plt.ylabel('$-\dot{S}_t/(1-S_T)$')
plt.xlabel('t')
c = cumtrapz(Dmean, self.timepoints)
ind = np.argmax(c >= 0.001)
plt.xlim((self.timepoints[ind], self.timepoints[-1] + 1))
sns.despine()
return fig
@classmethod
def prob_test_positive(cls, t, T, theta, lag=60):
a, b, g, d, l, r_I, r_V = theta
# T = self.T
t_span = [0, T + 1]
t_eval = np.linspace(0.0, T + 1, 100000)
y0 = [1.0, r_V, r_I, 0.0, 1.0]
ode_fun = lambda t, y: DSA1.SVIR_Extended_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval,
events=None, vectorized=False, args=None)
S = interp1d(t_eval, sol.y[0])
S_I = interp1d(t_eval, sol.y[4])
if t < lag:
test_pos_prob = (1.0 - S_I(t))
# test_pos_prob = (1.0 - S_I(t))/(1-S(T))
else:
test_pos_prob = (S_I(t - lag) - S_I(t))
# test_pos_prob = (S_I(t-21) - S_I(t))/(1-S(T))
return test_pos_prob
@classmethod
def binom_likelihood(cls, df_main, theta):
nDates = df_main.time.size
total_tests = df_main.daily_test.values
daily_pos = df_main.daily_positive.values
T = (df_main.time.max() - df_main.time.min()).days + 1
loglikelihood = 0.0
for d in range(nDates):
test_pos_prob = DSA1.prob_test_positive(d + 1, T, theta=theta)
loglikelihood = loglikelihood + binom.logpmf(daily_pos[d], total_tests[d], test_pos_prob, loc=0)
return -loglikelihood
def children_daily_test_pos_prediction(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
total_tests = df_main.children_daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA1.prob_test_positive(d + 1, T, sample)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], test_pos_prob, size=1)
return predicted_test_pos
def daily_test_pos_prediction(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
# dates = df_main.time.values
total_tests = df_main.daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA.prob_test_positive(d+1, T, sample)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], test_pos_prob, size=1)
return predicted_test_pos
def daily_test_pos_probabilities(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
test_pos_probabilities = np.zeros(nDates, dtype=np.float64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_probabilities[d] = DSA1.prob_test_positive(d+1, T, sample)
return test_pos_probabilities
def compare_test_pos_probabilities(self, samples, theta=None):
nSamples = np.size(samples, axis=0)
dates = self.df_main.time
nDays = len(dates)
test_pos_probabilities = np.zeros((nSamples, nDays), dtype=np.float64)
if theta is None:
theta = np.mean(samples, axis=0)
for i in range(nSamples):
sample = samples[i]
test_pos_probabilities[i] = self.daily_test_pos_probabilities(sample=sample)
m = np.mean(test_pos_probabilities, axis=0)
median = np.quantile(test_pos_probabilities, q=0.5, axis=0)
low = np.quantile(test_pos_probabilities, q=0.025, axis=0)
high = np.quantile(test_pos_probabilities, q=0.975, axis=0)
my_plot_configs()
fig = plt.figure()
lmedian, = plt.plot(self.df_main['time'].values, median, '-.', color=forrest['forrest5'].get_rgb(), lw=3,
label='Median')
lm, = plt.plot(self.df_main['time'].values, median, '-', color=forrest['forrest3'].get_rgb(), lw=3,
label='Mean')
l3, = plt.plot(self.df_main['time'].values, low, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
l4, = plt.plot(self.df_main['time'].values, high, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
# l5, = plt.fill_between(self.df_main['time'].values, low, high, alpha=.1, color=forrest['forrest1'].get_rgb())
l7, = plt.plot(self.df_main['time'].values, self.df_main['daily_pct_positive'].values, '-',
color=maroons['maroon3'].get_rgb(),
lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Daily percent positive')
# plt.ylim(0.0, 1.0)
plt.legend(handles=[lmedian, l7])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
# my_dict['Dates'] = dates['d']
my_dict['Dates'] = dates
my_dict['Mean'] = m
# my_dict['MLE'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict =
|
pd.DataFrame(my_dict)
|
pandas.DataFrame
|
"""
Test various functions related to Backtest Statistics
"""
import unittest
import datetime as dt
import pandas as pd
import numpy as np
from mlfinlab.backtest_statistics.statistics import (timing_of_flattening_and_flips, average_holding_period,
bets_concentration, all_bets_concentration,
drawdown_and_time_under_water, sharpe_ratio,
probabalistic_sharpe_ratio, deflated_sharpe_ratio,
minimum_track_record_length)
class TestBacktestStatistics(unittest.TestCase):
"""
Test following functions in statistocs.py:
- timing_of_flattening_and_flips
- average_holding_period
- bets_concentration
- all_bets_concentration
- compute_drawdown_and_time_under_water
- sharpe_ratio
- probabalistic_sharpe_ratio
- deflated_sharpe_ratio
- minimum_track_record_length
"""
def setUp(self):
"""
Set the data for tests.
"""
dates = np.array([dt.datetime(2000, 1, 1) + i * dt.timedelta(days=1) for i in range(10)])
flip_positions = np.array([1.0, 1.5, 0.5, 0, -0.5, -1.0, 0.5, 1.5, 1.5, 1.5])
hold_positions = np.array([0, 1, 1, -1, -1, 0, 0, 2, 2, 0])
no_closed_positions = np.array([0, 1, 1, 1, 1, 2, 2, 2, 2, 2])
positive_concentrated = np.array([-1, 1, 1, 0, 0, 3, 0, 2, -2, 0])
negative_concentrated = np.array([0, 1, -1, 0, -2, -1, 0, 2, -3, 0])
dollar_ret = np.array([100, 110, 90, 100, 120, 130, 100, 120, 140, 130])
normal_ret = np.array([0.01, 0.03, 0.02, 0.01, -0.01, 0.02, 0.01, 0.0, -0.01, 0.01])
cumulated_ret = np.cumprod(1 + normal_ret)
self.flip_flattening_positions = pd.Series(data=flip_positions, index=dates)
self.flips = pd.DatetimeIndex([dt.datetime(2000, 1, 7)])
self.flattenings = pd.DatetimeIndex([dt.datetime(2000, 1, 4), dt.datetime(2000, 1, 10)])
self.hold_positions =
|
pd.Series(data=hold_positions, index=dates)
|
pandas.Series
|
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/09/18"
import logging
import os
import json
import sys
import pandas as pd
import numpy as np
import random
import math
import itertools
import scipy.stats
from sklearn import linear_model
from math import exp, sqrt
import ai4materials.utils.unit_conversion as uc
logger = logging.getLogger('ai4materials')
def choose_atomic_features(selected_feature_list=None,
atomic_data_file=None, binary_data_file=None):
"""Choose primary features for the extended lasso procedure."""
df1 = pd.read_csv(atomic_data_file, index_col=False)
df2 = pd.read_csv(binary_data_file, index_col=False)
# merge two dataframes on Material
df = pd.merge(df1, df2, on='Mat')
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
radii_s_p = ['rp(A)', 'rs(A)', 'rp(B)', 'rs(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
e_val_z = ['Es(A)', 'val(A)']
df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Es(B)', 'val(B)']
df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(A)', 'val(A)']
df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(B)', 'val(B)']
df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
column_list = df.columns.tolist()
feature_list = column_list
if 'Mat' in feature_list:
feature_list.remove('Mat')
if 'Edim' in feature_list:
feature_list.remove('Edim')
logger.debug("Available features: \n {}".format(feature_list))
df_selected = df[selected_feature_list]
df_selected.insert(0, 'Mat', df['Mat'])
if selected_feature_list:
logger.info("Primary features selected: \n {}".format(selected_feature_list))
else:
logger.error("No selected features.")
sys.exit(1)
return df_selected
def classify_rs_zb(structure):
"""Classify if a structure is rocksalt of zincblend from a list of NoMaD structure.
(one json file). Supports multiple frames (TO DO: check that). Hard-coded.
rocksalt:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.5 0.5 0.5
zincblende:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.25 0.25 0.25
zincblende --> label=0
rocksalt --> label=1
"""
energy = {}
chemical_formula = {}
label = {}
# gIndexRun=0
# gIndexDesc=1
for (gIndexRun, gIndexDesc), atoms in structure.atoms.iteritems():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[(gIndexRun, gIndexDesc)]
# energy=1.0
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[(gIndexRun, gIndexDesc)]
# get labels, works only for RS/ZB dataset
pos_atom_2 = np.asarray(list(structure.scaled_positions.values())).reshape(2, 3)[1, :]
if all(i < 0.375 for i in pos_atom_2):
# label='zincblend'
label[gIndexRun, gIndexDesc] = 0
else:
# label='rocksalt'
label[gIndexRun, gIndexDesc] = 1
break
return chemical_formula, energy, label
def get_energy_diff(chemical_formula_list, energy_list, label_list):
""" Obtain difference in energy (eV) between rocksalt and zincblend structures of a given binary.
From a list of chemical formulas, energies and labels returns a dictionary
with {`material`: `delta_e`} where `delta_e` is the difference between the energy
with label 1 and energy with label 0, grouped by material.
Each element of such list corresponds to a json file.
The `delta_e` is exactly what reported in the PRL 114, 105503(2015).
.. todo:: Check if it works for multiple frames.
"""
energy_ = []
chemical_formula_ = []
label_ = []
# energy and chemical formula are lists even if only one frame is present
for i, energy_i in enumerate(energy_list):
energy_.append(energy_i.values())
for i, chemical_formula_i in enumerate(chemical_formula_list):
chemical_formula_.append(chemical_formula_i.values())
for i, label_i in enumerate(label_list):
label_.append(label_i.values())
# flatten the lists
energy = list(itertools.chain(*energy_))
chemical_formula = list(itertools.chain(*chemical_formula_))
label = list(itertools.chain(*label_))
df = pd.DataFrame()
df['Mat'] = chemical_formula
df['Energy'] = energy
df['Label'] = label
# generate summary dataframe with lowest zincblend and rocksalt energy
# zincblend --> label=0
# rocksalt --> label=1
df_summary = df.sort_values(by='Energy').groupby(['Mat', 'Label'], as_index=False).first()
groupby_mat = df_summary.groupby('Mat')
dict_delta_e = {}
for mat, df in groupby_mat:
# calculate the delta_e (E_RS - E_ZB)
energy_label_1 = df.loc[df['Label'] == 1].Energy.values
energy_label_0 = df.loc[df['Label'] == 0].Energy.values
# if energy_diff>0 --> rs
# if energy_diff<0 --> zb
if (energy_label_0 and energy_label_1):
# single element numpy array --> convert to scalar
energy_diff = (energy_label_1 - energy_label_0).item(0)
# divide by 2 because it is the energy_diff for each atom
energy_diff = energy_diff / 2.0
else:
logger.error(
"Could not find all the energies needed to calculate required property for material '{0}'".format(mat))
sys.exit(1)
dict_delta_e.update({mat: (energy_diff, energy_label_0, energy_label_1)})
return dict_delta_e
def get_lowest_energy_structures(structure, dict_delta_e):
"""Get lowest energy structure for each material and label type.
Works only with two possible labels for a given material.
.. todo:: Check if it works for multiple frames.
"""
energy = {}
chemical_formula = {}
is_lowest_energy = {}
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[gIndexRun, gIndexDesc]
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[gIndexRun, gIndexDesc]
lowest_energy_label_0 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[1]
lowest_energy_label_1 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[2]
if lowest_energy_label_0 > lowest_energy_label_1:
lowest_energy_label_01 = lowest_energy_label_1
else:
lowest_energy_label_01 = lowest_energy_label_0
if energy[gIndexRun, gIndexDesc] == lowest_energy_label_01:
is_lowest_energy[gIndexRun, gIndexDesc] = True
else:
is_lowest_energy[gIndexRun, gIndexDesc] = False
return is_lowest_energy
def write_atomic_features(structure, selected_feature_list, df, dict_delta_e=None,
path=None, filename_suffix='.json', json_file=None):
"""Given the chemical composition, build the descriptor made of atomic features only.
Includes all the frames in the same json file.
.. todo:: Check if it works for multiple frames.
"""
# make dictionary {primary_feature: value} for each structure
# dictionary of a dictionary, key: Mat, value: atomic_features
dict_features = df.set_index('chemical_formula').T.to_dict()
# label=0: rocksalt, label=1: zincblend
#chemical_formula_, energy_, label_ = classify_rs_zb(structure)
#is_lowest_energy_ = get_lowest_energy_structures(structure, dict_delta_e)
if structure.isPeriodic == True:
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
# filename is the normalized absolute path
filename = os.path.abspath(os.path.normpath(os.path.join(path,
'{0}{1}'.format(structure.name, filename_suffix))))
outF = file(filename, 'w')
outF.write("""
{
"data":[""")
cell = structure.atoms[gIndexRun, gIndexDesc].get_cell()
cell = np.transpose(cell)
atoms = structure.atoms[gIndexRun, gIndexDesc]
chemical_formula = structure.chemical_formula_[gIndexRun, gIndexDesc]
energy = structure.energy_eV[gIndexRun, gIndexDesc]
label = label_[gIndexRun, gIndexDesc]
#target = dict_delta_e.get(chemical_formula_[gIndexRun, gIndexDesc])[0]
target = dict_delta_e.get(chemical_formula)
atomic_features = dict_features[structure.chemical_formula[gIndexRun, gIndexDesc]]
#is_lowest_energy = is_lowest_energy_[gIndexRun,gIndexDesc]
res = {
"checksum": structure.name,
"label": label,
"energy": energy,
#"is_lowest_energy": is_lowest_energy,
"delta_e_rs_zb": target,
"chemical_formula": chemical_formula,
"gIndexRun": gIndexRun,
"gIndexDesc": gIndexDesc,
"cell": cell.tolist(),
"particle_atom_number": map(lambda x: x.number, atoms),
"particle_position": map(lambda x: [x.x, x.y, x.z], atoms),
"atomic_features": atomic_features,
"main_json_file_name": json_file,
}
json.dump(res, outF, indent=2)
outF.write("""
] }""")
outF.flush()
return filename
def r_sigma(row):
"""Calculates r_sigma.
John-Bloch's indicator1: |rp(A) + rs(A) - rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
"""
return abs(row[0] + row[1] - row[2] + row[3])
def r_pi(row):
"""Calculates r_pi.
John-Bloch's indicator2: |rp(A) - rs(A)| +| rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
combine_features
"""
return abs(row[0] - row[1]) + abs(row[2] - row[3])
def e_sqrt_z(row):
"""Calculates e/sqrt(val_Z).
Es/sqrt(Zval) and Ep/sqrt(Zval) from Phys. Rev. B 85, 104104 (2012).
Input Es(A) or Ep(A), val(A) (A-->B)
They need to be given in this order.
"""
return row[0] / math.sqrt(row[1])
def _get_scaling_factors(columns, metadata_info, energy_unit, length_unit):
"""Calculates characteristic energy and length, given an atomic metadata"""
scaling_factor = []
if columns is not None:
for col in columns:
try:
col_unit = metadata_info[col.split('(', 1)[0]]['units']
# check allowed values, to avoid problem with substance - NOT IDEAD
if col_unit == 'J':
scaling_factor.append(uc.convert_unit(1, energy_unit, target_unit='eV'))
# divide all column by e_0
#df.loc[:, col] *= e_0
elif col_unit == 'm':
scaling_factor.append(uc.convert_unit(1, length_unit, target_unit='angstrom'))
# divide all column by e_0
#df.loc[:, col] *= d_0
else:
scaling_factor.append(1.0)
logger.debug("Feature units are not energy nor lengths. "
"No scale to characteristic length.")
except BaseException:
scaling_factor.append(1.0)
logger.debug("Feature units not included in metadata")
return scaling_factor
def _my_power_2(row):
return pow(row[0], 2)
def _my_power_3(row):
return pow(row[0], 3)
def _my_power_m1(row):
return pow(row[0], -1)
def _my_power_m2(row):
return pow(row[0], -2)
def _my_power_m3(row):
return pow(row[0], -3)
def _my_abs_sqrt(row):
return math.sqrtabs(abs(row[0]))
def _my_exp(row):
return exp(row[0])
def _my_exp_power_2(row):
return exp(pow(row[0], 2))
def _my_exp_power_3(row):
return exp(pow(row[0], 3))
def _my_sum(row):
return row[0] + row[1]
def _my_abs_sum(row):
return abs(row[0] + row[1])
def _my_abs_diff(row):
return abs(row[0] - row[1])
def _my_diff(row):
return row[0] - row[1]
def _my_div(row):
return row[0] / row[1]
def _my_sum_power_2(row):
return pow((row[0] + row[1]), 2)
def _my_sum_power_3(row):
return pow((row[0] + row[1]), 3)
def _my_sum_exp(row):
return exp(row[0] + row[1])
def _my_sum_exp_power_2(row):
return exp(pow(row[0] + row[1], 2))
def _my_sum_exp_power_3(row):
return exp(pow(row[0] + row[1], 3))
def combine_features(df=None, energy_unit=None, length_unit=None,
metadata_info=None, allowed_operations=None, derived_features=None):
"""Generate combination of features given a dataframe and a list of allowed operations.
For the exponentials, we introduce a characteristic energy/length
converting the
..todo:: Fix under/overflow errors, and introduce handling of exceptions.
"""
if allowed_operations:
logger.info('Selected operations:\n {0}'.format(allowed_operations))
else:
logger.warning('No allowed operations selected.')
# make derived features
if derived_features is not None:
if 'r_sigma' in derived_features:
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
logger.info('Including rs and rp to allow r_sigma calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
if 'r_pi' in derived_features:
logger.info('Including rs and rp to allow r_pi calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
# e_val_z = ['Es(A)', 'val(A)']
# df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Es(B)', 'val(B)']
# df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
#
# e_val_z = ['Ep(A)', 'val(A)']
# df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Ep(B)', 'val(B)']
# df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
columns_ = df.columns.tolist()
# define subclasses of features (see Phys. Rev. Lett. 114, 105503(2015) Supp. info. pag.1)
# make a dictionary {feature: subgroup}
# features belonging to a0 will not be combined, just added at the end
# dict_features = {
# u'val(B)': 'a0', u'val(A)': 'a0',
#
# u'period__el0':'a0',
# u'period__el1':'a0',
# u'atomic_number__el0': 'a0',
# u'atomic_number__el1': 'a0',
# u'group__el0': 'a0',
# u'group__el1': 'a0',
#
# u'atomic_ionization_potential__el0': 'a1',
# u'atomic_ionization_potential__el1': 'a1',
# u'atomic_electron_affinity__el0': 'a1',
# u'atomic_electron_affinity__el1': 'a1',
# u'atomic_homo_lumo_diff__el0': 'a1',
# u'atomic_homo_lumo_diff__el1': 'a1',
# u'atomic_electronic_binding_energy_el0': 'a1',
# u'atomic_electronic_binding_energy_el1': 'a1',
#
#
# u'HOMO(A)': 'a2', u'LUMO(A)': 'a2', u'HOMO(B)': 'a2', u'LUMO(B)': 'a2',
# u'HL_gap_AB': 'a2',
# u'Ebinding_AB': 'a2',
#
# u'atomic_rs_max__el0': 'a3',
# u'atomic_rs_max__el1': 'a3',
# u'atomic_rp_max__el0': 'a3',
# u'atomic_rp_max__el1': 'a3',
# u'atomic_rd_max__el0': 'a3',
# u'atomic_rd_max__el1': 'a3',
# u'atomic_r_by_2_dimer__el0': 'a3',
# u'atomic_r_by_2_dimer__el1': 'a3',
#
# u'd_AB': 'a3',
# u'r_sigma': 'a3', u'r_pi': 'a3',
#
# u'Eh': 'a4', u'C': 'a4'
# }
dict_features = {
u'period': 'a0',
u'atomic_number': 'a0',
u'group': 'a0',
u'atomic_ionization_potential': 'a1',
u'atomic_electron_affinity': 'a1',
u'atomic_homo_lumo_diff': 'a1',
u'atomic_electronic_binding_energy': 'a1',
u'atomic_homo': 'a2', u'atomic_lumo': 'a2',
u'atomic_rs_max': 'a3',
u'atomic_rp_max': 'a3',
u'atomic_rd_max': 'a3',
u'atomic_r_by_2_dimer': 'a3',
u'r_sigma': 'a3', u'r_pi': 'a3'
}
# standardize the data -
# we cannot reproduce the PRL if we standardize the data
#df_a0 = (df_a0 - df_a0.mean()) / (df_a0.max() - df_a0.min())
#df_a1 = (df_a1 - df_a1.mean()) / (df_a1.max() - df_a1.min())
#df_a2 = (df_a2 - df_a2.mean()) / (df_a2.max() - df_a2.min())
#df_a3 = (df_a3 - df_a3.mean()) / (df_a3.max() - df_a3.min())
#df_a4 = (df_a4 - df_a4.mean()) / (df_a4.max() - df_a4.min())
# df_a0 = df[[col for col in columns_ if dict_features.get(col)=='a0']].astype('float32')
df_a0 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a0']].astype('float32')
df_a1 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a1']].astype('float32')
df_a2 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a2']].astype('float32')
df_a3 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a3']].astype('float32')
df_a4 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a4']].astype('float32')
col_a0 = df_a0.columns.tolist()
col_a1 = df_a1.columns.tolist()
col_a2 = df_a2.columns.tolist()
col_a3 = df_a3.columns.tolist()
col_a4 = df_a4.columns.tolist()
# this list will at the end all the dataframes created
df_list = []
df_b0_list = []
df_b1_list = []
df_b2_list = []
df_b3_list = []
df_c3_list = []
df_d3_list = []
df_e3_list = []
df_f1_list = []
df_f2_list = []
df_f3_list = []
df_x1_list = []
df_x2_list = []
df_x_list = []
# create b0: absolute differences and sums of a0
# this is not in the PRL.
for subset in itertools.combinations(col_a0, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = ['(' + subset[1] + '-' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '/' in allowed_operations:
cols = [subset[0] + '/' + subset[1]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = [subset[1] + '/' + subset[0]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a0, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a0[list(subset)].apply(_my_power_2, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a0[list(subset)].apply(_my_power_3, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_exp, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# create b1: absolute differences and sums of a1
for subset in itertools.combinations(col_a1, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
# create b2: absolute differences and sums of a2
for subset in itertools.combinations(col_a2, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
# create b3: absolute differences and sums of a3
for subset in itertools.combinations(col_a3, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
# create c3: two steps:
# 1) squares of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a3[list(subset)].apply(_my_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a3[list(subset)].apply(_my_power_3, axis=1)
df_c3_list.append(
|
pd.DataFrame(data, columns=cols)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
tm.assert_frame_equal(result, expected)
def test_demo(self):
# demonstration tests
df = pd.DataFrame({'A': range(5), 'B': 5})
result = df.agg(['min', 'max'])
expected = DataFrame({'A': [0, 4], 'B': [5, 5]},
columns=['A', 'B'],
index=['min', 'max'])
tm.assert_frame_equal(result, expected)
result = df.agg({'A': ['min', 'max'], 'B': ['sum', 'max']})
expected = DataFrame({'A': [4.0, 0.0, np.nan],
'B': [5.0, np.nan, 25.0]},
columns=['A', 'B'],
index=['max', 'min', 'sum'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
def test_agg_multiple_mixed_no_warning(self):
# https://github.com/pandas-dev/pandas/issues/20909
mdf = pd.DataFrame({'A': [1, 2, 3],
'B': [1., 2., 3.],
'C': ['foo', 'bar', 'baz'],
'D': pd.date_range('20130101', periods=3)})
expected = pd.DataFrame({"A": [1, 6], 'B': [1.0, 6.0],
"C": ['bar', 'foobarbaz'],
"D": [pd.Timestamp('2013-01-01'), pd.NaT]},
index=['min', 'sum'])
# sorted index
with tm.assert_produces_warning(None):
result = mdf.agg(['min', 'sum'])
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(None):
result = mdf[['D', 'C', 'B', 'A']].agg(['sum', 'min'])
# For backwards compatibility, the result's index is
# still sorted by function name, so it's ['min', 'sum']
# not ['sum', 'min'].
expected = expected[['D', 'C', 'B', 'A']]
tm.assert_frame_equal(result, expected)
def test_agg_dict_nested_renaming_depr(self):
df = pd.DataFrame({'A': range(5), 'B': 5})
# nested renaming
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.agg({'A': {'foo': 'min'},
'B': {'bar': 'max'}})
def test_agg_reduce(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
name1, name2 = self.frame.axes[other_axis].unique()[:2].sort_values()
# all reducers
expected = pd.concat([self.frame.mean(axis=axis),
self.frame.max(axis=axis),
self.frame.sum(axis=axis),
], axis=1)
expected.columns = ['mean', 'max', 'sum']
expected = expected.T if axis in {0, 'index'} else expected
result = self.frame.agg(['mean', 'max', 'sum'], axis=axis)
assert_frame_equal(result, expected)
# dict input with scalars
func = OrderedDict([(name1, 'mean'), (name2, 'sum')])
result = self.frame.agg(func, axis=axis)
expected = Series([self.frame.loc(other_axis)[name1].mean(),
self.frame.loc(other_axis)[name2].sum()],
index=[name1, name2])
|
assert_series_equal(result, expected)
|
pandas.util.testing.assert_series_equal
|
# Edits features of the Ellsworth stand shapefile in preparation for rasterization
# Script written in Python 3.7
import config as config
import numpy as np
import pandas as pd
import geopandas as gpd
# ======================================================================================================================
# Edit stand shapefile
# Edit species names to be VELMA appropriate and fill nulls
stand_shp = str(config.stand_shp)
stand_shp = gpd.read_file(config.stand_shp)
# Replace slashes with underscores
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].str.replace('/', '_')
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].str.replace('-', '_')
# Fix some errors and combine duplicate stand types
stand_shp.loc[pd.isnull(stand_shp['STAND_TYPE']), 'STAND_TYPE'] = 'BARE'
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].replace('TNC', 'DF') # Changing these errors to DF
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].replace('50074', 'DF')
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].replace('WH_RC_SS_RA', 'WH_SS_RC_RA')
# Remove numeric suffixes
p = [[j for j in i if not j.isnumeric()] for i in stand_shp['STAND_TYPE'].str.split('_')]
p = ['_'.join(i) for i in p]
stand_shp['STAND_TYPE'] = p
# CHANGING THEM ALL TO CONIFER FOR EASE IN VELMA
conifers = stand_shp['STAND_TYPE'].unique().tolist()
conifers = [x for x in conifers if x not in ['BARE', 'BPA', 'NF']]
stand_shp['STAND_TYPE'] = stand_shp['STAND_TYPE'].replace(dict.fromkeys(conifers, 'conifer'))
# Assign numbers to unique species names
unique_species = stand_shp['STAND_TYPE'].unique().tolist()
unique_numbers = (np.arange(len(unique_species)) + 1).tolist()
species_num_dict = {unique_species[i]: unique_numbers[i] for i in range(len(unique_species))}
stand_shp['SPECIES_ID'] = stand_shp['STAND_TYPE'].map(species_num_dict)
key = pd.DataFrame(np.column_stack([unique_species, unique_numbers]), columns=['type', 'id']) # Save species/number key
key.to_csv(config.cover_type_velma.parents[0] / 'cover_type_key.csv', index=False)
# Convert ages from strings to numbers
stand_shp['Age_2020'].replace('200+', '200', inplace=True)
stand_shp['Age_2020'] = stand_shp['Age_2020'].astype(int)
stand_shp.loc[
|
pd.isnull(stand_shp['Age_2020'])
|
pandas.isnull
|
from flask import Flask, request, render_template, jsonify, make_response, request
import pandas as pd
import os
from sklearn.externals import joblib
import numpy as np
app = Flask(__name__)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST':
new_data = pd.read_csv(request.files.get('file'))
test_data = pd.read_csv('test_data.csv')
testY_oldmodel = test_data['PRICE'].values
testX_oldmodel = test_data.drop('PRICE', axis=1)
old_model = joblib.load('oldmodel.pkl')
predictions = old_model.predict(testX_oldmodel)
from sklearn import metrics
mae_before = metrics.mean_absolute_error(testY_oldmodel, predictions)
mse_before = metrics.mean_squared_error(testY_oldmodel, predictions)
rmse_before = np.sqrt(metrics.mean_squared_error(testY_oldmodel, predictions))
rsq_before = metrics.r2_score(testY_oldmodel, predictions)
old_data = pd.read_csv('old.csv')
#new_data = pd.read_csv(request.files.get('file')) #From user (through web-interface)
df = pd.concat([old_data, new_data])
removable_features = ['SOURCE', 'CENSUS_TRACT', 'CENSUS_BLOCK', 'SQUARE', 'USECODE', 'Unnamed: 0', 'X', 'Y']
df = df.drop(removable_features, axis=1)
df['SALEDATE'] =
|
pd.to_datetime(df['SALEDATE'])
|
pandas.to_datetime
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from scipy import stats
from keras.layers import Input, Dropout, Dense, LSTM, TimeDistributed, RepeatVector
import seaborn as sns
import joblib
from keras import regularizers
from keras.models import Model
from google.cloud import bigquery
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf
import pandas as pd
import numpy as np
import os
from datetime import datetime
sns.set(color_codes=True)
# See chromeperf/ml/experiments/lstm/doc/lstm.md for more information about
# approach and purpose
# Please note that these are initial values and approximations. No fine tuning
# has been conducted to fit the approach to our specific data and requirements # yet.
def autoencoder_model(X):
inputs = Input(shape=(X.shape[1], X.shape[2]))
L1 = LSTM(16,
activation='relu',
return_sequences=True,
kernel_regularizer=regularizers.l2(0.00))(inputs)
L2 = LSTM(4, activation='relu', return_sequences=False)(L1)
L3 = RepeatVector(X.shape[1])(L2)
L4 = LSTM(4, activation='relu', return_sequences=True)(L3)
L5 = LSTM(16, activation='relu', return_sequences=True)(L4)
output = TimeDistributed(Dense(X.shape[2]))(L5)
model = Model(inputs=inputs, outputs=output)
return model
bigquery_client = bigquery.Client(project='chromeperf-datalab')
#we can change this later to loop through different cases
# for multiple different combinations of metric, bot and platform
case = "ChromiumPerf", "linux-perf", "system_health.common_desktop/timeToFirstPaint/%/%"
sql = """
SELECT
SPLIT(measurement, '/')[SAFE_OFFSET(2)] label,
revision,
value,
std_error,
sample_values
FROM `chromeperf.chromeperf_dashboard_data.rows`
WHERE DATE(timestamp) >= DATE_SUB(CURRENT_DATE(), INTERVAL 30 DAY)
AND master = "{MASTER}"
AND bot = "{BOT}"
AND measurement LIKE "{METRIC}"
ORDER BY revision ASC
"""
def fetch_sample_data(case):
MASTER, BOT, METRIC = case
return bigquery_client.query(
sql.format(**locals())).result().to_dataframe()
df = fetch_sample_data(case)
# Data Preprocessing
# Here we are organising the data to be grouped by label and revision number.
label_group = df.groupby([
'label',
'revision',
])['sample_values'].apply(pd.Series.to_numpy)
groups = tuple(zip(*label_group.keys()))[0]
# This gets an array of the keys in the dictionary.
label_names = np.unique(groups)
label_stats = {}
for label in label_names:
revisions_dictionary = label_group[label]
revisions_list = list(revisions_dictionary.keys())
stats_dictionary = {}
for revision in revisions_list:
values = np.concatenate(revisions_dictionary[revision], axis=0)
median = np.median(values)
average = np.average(values)
std = np.std(values)
mean = np.mean(values)
minimum = np.min(values)
maximum = np.max(values)
delta = maximum - minimum
IQR = stats.iqr(values, interpolation='midpoint')
stats_dictionary[revision] = {
'total': np.sum(values),
'median': median,
'average': average,
'standard deviation': std,
'mean': mean,
'min': minimum,
'max': maximum,
'delta': delta,
'IQR': IQR
}
label_stats[label] = stats_dictionary
# Model building and training
#
# We will create a model for each label so we must loop through each label in
# the data prepared above.
for label in label_stats:
current_directory = os.getcwd()
final_directory = os.path.join(current_directory, label)
os.makedirs(final_directory)
current_path = os.getcwd()
# We are transposing the data in order to get it into a layout that the encoder will process.
stats = pd.DataFrame.from_dict(label_stats[label]).transpose()
# Here we are doing the training testing split for the data.
rowcount = stats.shape[0]
training = round(rowcount * 0.75)
training_set = stats.iloc[0:training] #is this an inclusive slice
testing_set = stats.iloc[training:]
# Raw data plots:
# Training data.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(training_set['median'],
label='median',
color='blue',
animated=True,
linewidth=1)
ax.plot(training_set['min'],
label='min',
color='black',
animated=True,
linewidth=1)
ax.plot(training_set['max'],
label='max',
color='red',
animated=True,
linewidth=1)
ax.plot(training_set['delta'],
label='delta',
color='green',
animated=True,
linewidth=1)
ax.plot(training_set['IQR'],
label='IQR',
color='magenta',
animated=True,
linewidth=1)
plt.legend(loc='lower left')
ax.set_title('Training data for ' + label, fontsize=14)
my_file = label + '/training_' + label + '.png'
path = os.path.join(current_path, my_file)
fig.savefig(path)
# Testing data.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(testing_set['median'],
label='median',
color='blue',
animated=True,
linewidth=1)
ax.plot(testing_set['min'],
label='min',
color='black',
animated=True,
linewidth=1)
ax.plot(testing_set['max'],
label='max',
color='red',
animated=True,
linewidth=1)
ax.plot(testing_set['delta'],
label='delta',
color='green',
animated=True,
linewidth=1)
ax.plot(testing_set['IQR'],
label='IQR',
color='magenta',
animated=True,
linewidth=1)
plt.legend(loc='lower left')
ax.set_title('Testing data for ' + label, fontsize=14)
my_file = label + '/testing_' + label + '.png'
path = os.path.join(current_path, my_file)
fig.savefig(path)
# Fourier transform
#
# This transform is used to determine whether there are frequencies that
# dominate the data. Any major changes are easily identified when studying
# the frequency domain. We do not use this data, this is purely for context
# for the reader.
train_fft_set = np.fft.fft(training_set)
test_fft_set = np.fft.fft(testing_set)
# Here we plot the different training data for our model on the same axes.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(train_fft_set[:, 1].real,
label='median',
color='blue',
animated=True,
linewidth=1)
ax.plot(train_fft_set[:, 5].real,
label='min',
color='black',
animated=True,
linewidth=1)
ax.plot(train_fft_set[:, 6].real,
label='max',
color='red',
animated=True,
linewidth=1)
ax.plot(train_fft_set[:, 7].real,
label='delta',
color='green',
animated=True,
linewidth=1)
ax.plot(train_fft_set[:, 8].real,
label='IQR',
color='magenta',
animated=True,
linewidth=1)
plt.legend(loc='lower left')
ax.set_title('Training data for ' + label, fontsize=14)
my_file = label + '/fourierTraining_' + label + '.png'
path = os.path.join(current_path, my_file)
fig.savefig(path)
# Here we plot the different test data for our model on the same axes.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(test_fft_set[:, 1].real,
label='median',
color='blue',
animated=True,
linewidth=1)
ax.plot(test_fft_set[:, 5].real,
label='min',
color='black',
animated=True,
linewidth=1)
ax.plot(test_fft_set[:, 6].real,
label='max',
color='red',
animated=True,
linewidth=1)
ax.plot(test_fft_set[:, 7].real,
label='delta',
color='green',
animated=True,
linewidth=1)
ax.plot(test_fft_set[:, 8].real,
label='IQR',
color='magenta',
animated=True,
linewidth=1)
plt.legend(loc='lower left')
ax.set_title('Test data for ' + label, fontsize=14)
my_file = label + '/fourierTest_' + label + '.png'
path = os.path.join(current_path, my_file)
fig.savefig(path)
# To complete the pre-processing of our data, we will first normalize it to
# a range between 0 and 1. Then we reshape our data so that it is in a
# suitable format to be input into an LSTM network. LSTM cells expect a 3
# dimensional tensor of the form [data samples,time/revision
# steps,features]. Here, each sample input into the LSTM network represents
# one revision (which acts as one step in time) and contains 5 features —
# the statistics found for the collection of sample values at that revision.
scaler = MinMaxScaler()
X_train = scaler.fit_transform(training_set)
X_test = scaler.transform(testing_set)
scaler_filename = "scaler_data"
joblib.dump(scaler, scaler_filename)
# Here we are reshaping the training data so that it will be processed by
# our autoencoder.
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
print("training data shape for " + label, X_train.shape)
X_test = X_test.reshape(X_test.shape[0], 1, X_test.shape[1])
print("test data shape for " + label, X_test.shape)
# Here we are building the model.
model = autoencoder_model(X_train)
model.compile(optimizer='adam', loss='mae')
model.summary()
# We train the model over 100 epochs. An epoch is one cycle through the full
# training dataset. So in this scenario, we are running the model over the
# training set 100 times in order to complete the training.
nb_epochs = 100
batch_size = 10
history = model.fit(X_train,
X_train,
epochs=nb_epochs,
batch_size=batch_size,
validation_split=0.05).history
# We plot the losses found in training to evaluate the performance of the
# model we have built.
fig, ax = plt.subplots(figsize=(14, 6), dpi=80)
ax.plot(history['loss'], 'b', label='Train', linewidth=2)
ax.plot(history['val_loss'], 'y', label='Validation', linewidth=2)
ax.set_title('Model loss for ' + label, fontsize=14)
ax.set_ylabel('Loss (mae)')
ax.set_xlabel('Epoch')
ax.legend(loc='upper right')
plt.show()
# Here we see the loss distribution plot.
X_pred = model.predict(X_train)
X_pred = X_pred.reshape(X_pred.shape[0], X_pred.shape[2])
X_pred = pd.DataFrame(X_pred, columns=training_set.columns)
X_pred.index = training_set.index
scored = pd.DataFrame(index=training_set.index)
Xtrain = X_train.reshape(X_train.shape[0], X_train.shape[2])
scored['Loss_mae'] = np.mean(np.abs(X_pred - Xtrain), axis=1)
plt.figure(figsize=(16, 9), dpi=80)
plt.title('Loss Distribution for ' + label, fontsize=16)
sns.distplot(scored['Loss_mae'], bins=20, kde=True, color='blue')
plt.xlim([0.0, .5])
sum_stats = scored['Loss_mae'].describe(percentiles=[.9, .95, .99])
# The tutorial followed in this notebook suggests that the way to determine
# the threshold is by analysing the loss distribution graph to determine the
# point at which the loss is negligible. However, we have decided to apply
# the 99th percentile as the threshold as it seems to provide a more
# reliable marker for acceptance.
X_pred = model.predict(X_test)
X_pred = X_pred.reshape(X_pred.shape[0], X_pred.shape[2])
X_pred =
|
pd.DataFrame(X_pred, columns=testing_set.columns)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
更新数据范围列表,用于下载数据时减少重复下载
日线数据路径是 IF/IF1804.h5
分钟数据路径是 IF1804/IF1804_20180226.h5
已经退市的合约,只要做过一次,后面就没有必要再做了
但数据还是有问题,不活跃的合约,最后几天完全没有行情了
"""
import os
import sys
import pandas as pd
from datetime import datetime
from kquant_data.utils.xdatetime import yyyyMMddHHmm_2_datetime, yyyyMMdd_2_datetime
from kquant_data.config import __CONFIG_H5_FUT_SECTOR_DIR__
from kquant_data.future.symbol import wind_code_2_InstrumentID
from kquant_data.xio.csv import read_datetime_dataframe
from kquant_data.utils.xdatetime import tic, toc
# 解决Python 3.6的pandas不支持中文路径的问题
print(sys.getfilesystemencoding()) # 查看修改前的
try:
sys._enablelegacywindowsfsencoding() # 修改
print(sys.getfilesystemencoding()) # 查看修改后的
except:
pass
path_ipo_last_trade = os.path.join(__CONFIG_H5_FUT_SECTOR_DIR__, 'ipo_last_trade_trading.csv')
def get_in_file_day(file_path, wind_code, df):
print(file_path)
# 需要
df_h5 =
|
pd.read_hdf(file_path)
|
pandas.read_hdf
|
import os
import pandas as pd #for data analysis
import matplotlib.pyplot as plt
import cv2
import numpy as np
import math
import pydicom as pydicom
import tensorflow as tf
import tensorflow_addons as tfa
import sklearn
from sklearn.model_selection import train_test_split
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
from tqdm import tqdm
import argparse
import gdcm
import random
import scipy.ndimage
import collections
import imblearn
import numpy
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras import utils as np_utils
from keras.utils.np_utils import to_categorical
from random import seed
from random import random
from random import randint
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from tensorflow import keras
from tensorflow.keras.applications.vgg16 import preprocess_input
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import preprocessing
from tensorflow.keras import models
## ---------- Data set up functions, balancing data, and spliting dataset ------------ ######
#this function extracts the pixel_data and resizes it to a user defined size defult is 50, and the associated vector for classification is generated
#NOTE: Slight changes are made to all of the code to make futher improvements using better libraries
def getImagesAndLabels(imageArray, labelArray, img_px_size=50, visualize=False):
np.random.seed = 1
images = []
labels = []
uids = []
idx = 0
print("getting images and labels")
for file, mortality in tqdm(zip(imageArray.iteritems(), labelArray.iteritems()),total = len(imageArray)):
uid = file[1]
label=mortality[1]
path = uid
image = pydicom.read_file(path)
if image.Modality != "SR":
if "PixelData" in image:
idx += 1
resized_image = cv2.resize(np.array(image.pixel_array),(img_px_size,img_px_size))
value = randint(1, 10)
factor = random()
if value == 3:
fig, ax = plt.subplots(1,2)
ax[0].imshow(resized_image)
resized_image = np.fliplr(resized_image)
ax[1].imshow(resized_image)
#NOTE: within a docker container you will not be able to see these visualization
#uncomment this line if you would like to see what the image looks like when flipped accross the y axis
# plt.show()
#this set of code is commented out as visuilization is not possible within a docker container but if you run this seperatly or within jupyter you are able to visulize every 15th image, change 15 to what ever number you like if you want to visulize more or less
if visualize:
#every 15th image is "visualized" changing the 15 will allow you view every xth image
if idx%15==0:
fig = plt.figure()
plt.imshow(resized_image)
# plt.show()
images.append(resized_image)
labels.append(label)
uids.append(uid)
print("total subjects avilable: ", idx)
print("lenth of images", len(images))
return images, labels, uids
#this function will balance data however compared to the TrainModel-Container, after gaining futher understanding, test data is not blanced to mimic real world work. Credit for help understanding this Sotiras, A. Assistant Professor of Radiology @WASHU
#as the dataset was imbalanced, balancing tehniques were applied, in this case the number of dicom for each class is counted and then balanced according to user's preference, it can either be undersampeled or over sampeled
def balanceData(imageArray, labelArray, underSample = False,):
# print(imageArray, labelArray)
concatinatedArrray = pd.concat([imageArray, labelArray], axis=1)
count_class_0, count_class_1 = concatinatedArrray.mortality.value_counts()
df_class_0 = concatinatedArrray[concatinatedArrray['mortality'] == 0]
df_class_1 = concatinatedArrray[concatinatedArrray['mortality'] == 1]
print("alive", len(df_class_0), "dead", len(df_class_1))
# print("before balancing")
concatinatedArrray.mortality.value_counts().plot(kind='bar', title='before balancing');
#undersampleling of data is done if user cooses to under sample
if underSample:
df_class_0_under = df_class_0.sample(count_class_1)
df_test_under = pd.concat([df_class_0_under, df_class_1], axis=0)
print('Random under-sampling:')
# print(df_test_under.mortality.value_counts())
# print("after balancing")
df_test_under.mortality.value_counts().plot(kind='bar', title='after balancing_undersample');
total_data = pd.concat([df_class_0_under, df_class_1])
# print(len(total_data))
#over sampleing is done if user does not check undersample
else:
df_class_1_over = df_class_1.sample(count_class_0, replace=True)
df_test_over = pd.concat([df_class_0, df_class_1_over], axis=0)
print('Random over-sampling:')
# print(df_test_over.mortality.value_counts())
# print("after balancing")
df_test_over.mortality.value_counts().plot(kind='bar', title='after balancing_oversample');
total_data = pd.concat([df_class_0, df_class_1_over])
# print(len(total_data))
return total_data.path, total_data.mortality, total_data
#this function will split the data in to train,validation, and test datasets steps are as follows:
#1 user provides testSize, which will split the orgninal data set in to 1-x% training and x% "test dataset"
#2 the "test dataset" is then split again in half for validation and half an actual test dataset
def splitData(px_size, visulize = False, testSize = 0.30, randState = 50, underSamp=False, numClasses=2):
count_class_0, count_class_1 = df_train.mortality.value_counts()
#getting classes counts
df_class_0 = df_train[df_train['mortality'] == 0]
df_class_1 = df_train[df_train['mortality'] == 1]
total_data =
|
pd.concat([df_class_0, df_class_1])
|
pandas.concat
|
"""Test for utils.py"""
from unittest.mock import Mock
import numpy as np
import pytest
from scipy import sparse
import torch
from torch.nn.utils.rnn import PackedSequence
from torch.nn.utils.rnn import pack_padded_sequence
from scripts.study_case.ID_12.skorch.tests.conftest import pandas_installed
class TestToTensor:
@pytest.fixture
def to_tensor(self):
from scripts.study_case.ID_12.skorch.utils import to_tensor
return to_tensor
@pytest.mark.skipif(not torch.cuda.is_available(), reason="no cuda device")
def test_device_setting_cuda(self, to_tensor):
x = np.ones((2, 3, 4))
t = to_tensor(x, device='cpu')
assert t.device.type == 'cpu'
t = to_tensor(x, device='cuda')
assert t.device.type.startswith('cuda')
t = to_tensor(t, device='cuda')
assert t.device.type.startswith('cuda')
t = to_tensor(t, device='cpu')
assert t.device.type == 'cpu'
def tensors_equal(self, x, y):
""""Test that tensors in diverse containers are equal."""
if isinstance(x, PackedSequence):
return self.tensors_equal(x[0], y[0]) and self.tensors_equal(x[1], y[1])
if isinstance(x, dict):
return (
(x.keys() == y.keys()) and
all(self.tensors_equal(x[k], y[k]) for k in x)
)
if isinstance(x, (list, tuple)):
return all(self.tensors_equal(xi, yi) for xi, yi in zip(x, y))
if x.is_sparse is not y.is_sparse:
return False
if x.is_sparse:
x, y = x.to_dense(), y.to_dense()
return (x == y).all()
# pylint: disable=no-method-argument
def parameters():
"""Yields data, expected value, and device for tensor conversion
test.
Stops earlier when no cuda device is available.
"""
device = 'cpu'
x = torch.zeros((5, 3)).float()
y = torch.as_tensor([2, 2, 1])
z = np.arange(15).reshape(5, 3)
for X, expected in [
(x, x),
(y, y),
([x, y], [x, y]),
((x, y), (x, y)),
(z, torch.as_tensor(z)),
(
{'a': x, 'b': y, 'c': z},
{'a': x, 'b': y, 'c': torch.as_tensor(z)}
),
(torch.as_tensor(55), torch.as_tensor(55)),
(pack_padded_sequence(x, y), pack_padded_sequence(x, y)),
]:
yield X, expected, device
if not torch.cuda.is_available():
return
device = 'cuda'
x = x.to('cuda')
y = y.to('cuda')
for X, expected in [
(x, x),
(y, y),
([x, y], [x, y]),
((x, y), (x, y)),
(z, torch.as_tensor(z).to('cuda')),
(
{'a': x, 'b': y, 'c': z},
{'a': x, 'b': y, 'c': torch.as_tensor(z).to('cuda')}
),
(torch.as_tensor(55), torch.as_tensor(55).to('cuda')),
(
pack_padded_sequence(x, y),
pack_padded_sequence(x, y).to('cuda')
),
]:
yield X, expected, device
@pytest.mark.parametrize('X, expected, device', parameters())
def test_tensor_conversion_cuda(self, to_tensor, X, expected, device):
result = to_tensor(X, device)
assert self.tensors_equal(result, expected)
assert self.tensors_equal(expected, result)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_sparse_tensor(self, to_tensor, device):
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip()
inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
expected = torch.sparse_coo_tensor(size=(5, 3)).to(device)
result = to_tensor(inp, device=device, accept_sparse=True)
assert self.tensors_equal(result, expected)
@pytest.mark.parametrize('device', ['cpu', 'cuda'])
def test_sparse_tensor_not_accepted_raises(self, to_tensor, device):
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip()
inp = sparse.csr_matrix(np.zeros((5, 3)).astype(np.float32))
with pytest.raises(TypeError) as exc:
to_tensor(inp, device=device)
msg = ("Sparse matrices are not supported. Set "
"accept_sparse=True to allow sparse matrices.")
assert exc.value.args[0] == msg
class TestDuplicateItems:
@pytest.fixture
def duplicate_items(self):
from scripts.study_case.ID_12.skorch.utils import duplicate_items
return duplicate_items
@pytest.mark.parametrize('collections', [
([],),
([], []),
([], [], []),
([1, 2]),
([1, 2], [3]),
([1, 2], [3, '1']),
([1], [2], [3], [4]),
({'1': 1}, [2]),
({'1': 1}, {'2': 1}, ('3', '4')),
])
def test_no_duplicates(self, duplicate_items, collections):
assert duplicate_items(*collections) == set()
@pytest.mark.parametrize('collections, expected', [
([1, 1], {1}),
(['1', '1'], {'1'}),
([[1], [1]], {1}),
([[1, 2, 1], [1]], {1}),
([[1, 1], [2, 2]], {1, 2}),
([[1], {1: '2', 2: '2'}], {1}),
([[1, 2], [3, 4], [2], [3]], {2, 3}),
([{'1': 1}, {'1': 1}, ('3', '4')], {'1'}),
])
def test_duplicates(self, duplicate_items, collections, expected):
assert duplicate_items(*collections) == expected
class TestParamsFor:
@pytest.fixture
def params_for(self):
from scripts.study_case.ID_12.skorch.utils import params_for
return params_for
@pytest.mark.parametrize('prefix, kwargs, expected', [
('p1', {'p1__a': 1, 'p1__b': 2}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2}, {}),
('p1', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 1, 'b': 2}),
('p2', {'p1__a': 1, 'p1__b': 2, 'p2__a': 3}, {'a': 3}),
])
def test_params_for(self, params_for, prefix, kwargs, expected):
assert params_for(prefix, kwargs) == expected
class TestDataFromDataset:
@pytest.fixture
def data_from_dataset(self):
from scripts.study_case.ID_12.skorch.utils import data_from_dataset
return data_from_dataset
@pytest.fixture
def data(self):
X = np.arange(8).reshape(4, 2)
y = np.array([1, 3, 0, 2])
return X, y
@pytest.fixture
def skorch_ds(self, data):
from scripts.study_case.ID_12.skorch.dataset import Dataset
return Dataset(*data)
@pytest.fixture
def subset(self, skorch_ds):
from torch.utils.data.dataset import Subset
return Subset(skorch_ds, [1, 3])
@pytest.fixture
def subset_subset(self, subset):
from torch.utils.data.dataset import Subset
return Subset(subset, [0])
# pylint: disable=missing-docstring
@pytest.fixture
def other_ds(self, data):
class MyDataset:
"""Non-compliant dataset"""
def __init__(self, data):
self.data = data
def __getitem__(self, idx):
return self.data[0][idx], self.data[1][idx]
def __len__(self):
return len(self.data[0])
return MyDataset(data)
def test_with_skorch_ds(self, data_from_dataset, data, skorch_ds):
X, y = data_from_dataset(skorch_ds)
assert (X == data[0]).all()
assert (y == data[1]).all()
def test_with_subset(self, data_from_dataset, data, subset):
X, y = data_from_dataset(subset)
assert (X == data[0][[1, 3]]).all()
assert (y == data[1][[1, 3]]).all()
def test_with_subset_subset(self, data_from_dataset, data, subset_subset):
X, y = data_from_dataset(subset_subset)
assert (X == data[0][1]).all()
assert (y == data[1][1]).all()
def test_with_other_ds(self, data_from_dataset, other_ds):
with pytest.raises(AttributeError):
data_from_dataset(other_ds)
def test_with_dict_data(self, data_from_dataset, data, subset):
subset.dataset.X = {'X': subset.dataset.X}
X, y = data_from_dataset(subset)
assert (X['X'] == data[0][[1, 3]]).all()
assert (y == data[1][[1, 3]]).all()
def test_subset_with_y_none(self, data_from_dataset, data, subset):
subset.dataset.y = None
X, y = data_from_dataset(subset)
assert (X == data[0][[1, 3]]).all()
assert y is None
class TestMultiIndexing:
@pytest.fixture
def multi_indexing(self):
from scripts.study_case.ID_12.skorch.dataset import multi_indexing
return multi_indexing
@pytest.mark.parametrize('data, i, expected', [
(
np.arange(12).reshape(4, 3),
slice(None),
np.arange(12).reshape(4, 3),
),
(
np.arange(12).reshape(4, 3),
np.s_[2],
np.array([6, 7, 8]),
),
(
np.arange(12).reshape(4, 3),
np.s_[-2:],
np.array([[6, 7, 8], [9, 10, 11]]),
),
])
def test_ndarray(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
(
torch.arange(0, 12).view(4, 3),
slice(None),
np.arange(12).reshape(4, 3),
),
(
torch.arange(0, 12).view(4, 3),
np.s_[2],
np.array([6, 7, 8]),
),
(
torch.arange(0, 12).view(4, 3),
np.int64(2),
np.array([6, 7, 8]),
),
(
torch.arange(0, 12).view(4, 3),
np.s_[-2:],
np.array([[6, 7, 8], [9, 10, 11]]),
),
])
def test_torch_tensor(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i).long().numpy()
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
([1, 2, 3, 4], slice(None), [1, 2, 3, 4]),
([1, 2, 3, 4], slice(None, 2), [1, 2]),
([1, 2, 3, 4], 2, 3),
([1, 2, 3, 4], -2, 3),
])
def test_list(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert np.allclose(result, expected)
@pytest.mark.parametrize('data, i, expected', [
({'a': [0, 1, 2], 'b': [3, 4, 5]}, 0, {'a': 0, 'b': 3}),
(
{'a': [0, 1, 2], 'b': [3, 4, 5]},
np.s_[:2],
{'a': [0, 1], 'b': [3, 4]},
)
])
def test_dict_of_lists(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result == expected
@pytest.mark.parametrize('data, i, expected', [
(
{'a': np.arange(3), 'b': np.arange(3, 6)},
0,
{'a': 0, 'b': 3}
),
(
{'a': np.arange(3), 'b': np.arange(3, 6)},
np.s_[:2],
{'a': np.arange(2), 'b': np.arange(3, 5)}
),
])
def test_dict_of_arrays(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result.keys() == expected.keys()
for k in result:
assert np.allclose(result[k], expected[k])
@pytest.mark.parametrize('data, i, expected', [
(
{'a': torch.arange(0, 3), 'b': torch.arange(3, 6)},
0,
{'a': 0, 'b': 3}
),
(
{'a': torch.arange(0, 3), 'b': torch.arange(3, 6)},
np.s_[:2],
{'a': np.arange(2), 'b': np.arange(3, 5)}
),
])
def test_dict_of_torch_tensors(self, multi_indexing, data, i, expected):
result = multi_indexing(data, i)
assert result.keys() == expected.keys()
for k in result:
try:
val = result[k].long().numpy()
except AttributeError:
val = result[k]
assert np.allclose(val, expected[k])
def test_mixed_data(self, multi_indexing):
data = [
[1, 2, 3],
np.arange(3),
torch.arange(3, 6),
{'a': [4, 5, 6], 'b': [7, 8, 9]},
]
result = multi_indexing(data, 0)
expected = [1, 0, 3, {'a': 4, 'b': 7}]
assert result == expected
def test_mixed_data_slice(self, multi_indexing):
data = [
[1, 2, 3],
np.arange(3),
torch.arange(3, 6),
{'a': [4, 5, 6], 'b': [7, 8, 9]},
]
result = multi_indexing(data, np.s_[:2])
assert result[0] == [1, 2]
assert np.allclose(result[1], np.arange(2))
assert np.allclose(result[2].long().numpy(), np.arange(3, 5))
assert result[3] == {'a': [4, 5], 'b': [7, 8]}
@pytest.fixture
def pd(self):
if not pandas_installed:
pytest.skip()
import pandas as pd
return pd
def test_pandas_dataframe(self, multi_indexing, pd):
df = pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0])
result = multi_indexing(df, 0)
# Note: taking one row of a DataFrame returns a Series
expected = pd.Series(data=[0, 3], index=['a', 'b'], name=2)
assert result.equals(expected)
def test_pandas_dataframe_slice(self, multi_indexing, pd):
import pandas as pd
df = pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0])
result = multi_indexing(df, np.s_[:2])
expected = pd.DataFrame({'a': [0, 1], 'b': [3, 4]}, index=[2, 1])
assert result.equals(expected)
def test_pandas_series(self, multi_indexing, pd):
series = pd.Series(data=[0, 1, 2], index=[2, 1, 0])
result = multi_indexing(series, 0)
assert result == 0
def test_pandas_series_slice(self, multi_indexing, pd):
series = pd.Series(data=[0, 1, 2], index=[2, 1, 0])
result = multi_indexing(series, np.s_[:2])
expected = pd.Series(data=[0, 1], index=[2, 1])
assert result.equals(expected)
def test_list_of_dataframe_and_series(self, multi_indexing, pd):
data = [
|
pd.DataFrame({'a': [0, 1, 2], 'b': [3, 4, 5]}, index=[2, 1, 0])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import chart_studio
import matplotlib.pyplot as plt
from plotly import graph_objs as go
import sklearn
import seaborn as sns
import itertools
def plot_results(x_values, y_init_concat, y_pred_concat, zoomable=False):
# Use of classic motplotlib
if not zoomable:
plt.subplots(figsize=(10, 4))
plt.plot(x_values, y_init_concat, label='truth') # , marker='o')
plt.plot(x_values, y_pred_concat, label='prediction') # , marker='o')
plt.legend()
plt.show()
# Use of Plotly
else:
temp_real = go.Scatter(
x=pd.Series(x_values),
y=
|
pd.Series(y_init_concat)
|
pandas.Series
|
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
class NullMaker(TransformerMixin):
"""
Finds values that should be nulls and replaces with null
"""
def fit(self, X, y=None):
return self
def transform(self, X):
Xnulls = X.replace(["UNABLE TO DETERMINE", "NOT APPLICABLE", "UNKNOWN"], np.nan)
return Xnulls
class BeatFormatter(TransformerMixin):
"""
Roll up police beat of occurence value to two digits
"""
def fit(self, X, y=None):
return self
def transform(self, X):
dfs = []
X =
|
pd.DataFrame(X)
|
pandas.DataFrame
|
from collections import defaultdict
from soynlp.word import WordExtractor
from soynlp.vectorizer import sent_to_word_contexts_matrix
from soynlp.word import pmi as pmi_func
from soynlp.tokenizer import LTokenizer
from soynlp.utils import most_similar
import pandas as pd
import sqlite3
import re
from string import punctuation
# Stopwords 처리
pattern1 = re.compile(r'[{}]'.format(re.escape(punctuation))) # punctuation 제거
pattern2 = re.compile(r'[^가-힣 ]') # 특수문자, 자음, 모음, 숫자, 영어 제거
pattern3 = re.compile(r'\s{2,}') # white space 1개로 바꾸기.
class Sentiment:
def __init__(self):
self.word_extractor = WordExtractor()
def extract_sent(self, df, words): # DataFrame(df) 입력 및 신어(words) 입력
# 불용어 처리
df['head'] = df['head'].map(lambda x: pattern3.sub(' ',
pattern2.sub('',
pattern1.sub('', x))))
# 신어에 해당하는 sentences 추출
sent = defaultdict(lambda: 0)
for w in words:
temp = [s for s in df['head'] if w in s]
sent[w] = ' '.join(temp)
# DataFrame 에서 각 문장에서 신어가 포함되어있으면 그 문장을 temp에 저장한다.
# sent dict에 key = 신어, value = 신어가 포함된 예문 전체(문장은 double space로 구분)로 저장한다.
return sent
# 입력 k, v에 대해서 (k는 word, v는 sentence이다.) 가장 유사한 10개의 단어에 대해서 (단어, pmi) 쌍을 출력해준다.
def extract_most_related(self, k, v, words, num=10): # word와 sentence, 해당 신어, 출력할 유사한 단어의 갯수 입력
self.word_extractor.train([v]) # 신어에 대한 예문 전체를 word_extractor로 학습
cohesions = self.word_extractor.all_cohesion_scores() # cohesion_scores를 cohesions에 저장
l_cohesions = {word: score[0] for word, score in cohesions.items()} # 각 단어와 각 단어의 cohesion_forward값을 l_cohesions에 저장
l_cohesions.update(words)
tokenizer = LTokenizer(l_cohesions) #토크 나이저 학습
# data수가 적은 News data에 대해서는 min_tf를 2로 설정한다.
x, idx2vocab = sent_to_word_contexts_matrix([v], windows=3, min_tf=10, tokenizer=tokenizer,
dynamic_weight=False, verbose=True)
# idx2vocab : LTokenizer를 통해 나온 단어들 목록
# 해당 단어 주위 3개 단어 추출
pmi, px, py = pmi_func(x, min_pmi=0, alpha=0.0, beta=0.75)
# x 의 (rows, columns) 에 대한 pmi 를 계산합니다. row 가 x, column 이 y 입니다.
vocab2idx = {vocab: idx for idx, vocab in enumerate(idx2vocab)} # 단어:index 구조의 dictionary
query = vocab2idx[k]
submatrix = pmi[query, :].tocsr() # get the row of query
contexts = submatrix.nonzero()[1] # nonzero() return (rows, columns)
pmi_i = submatrix.data
most_relateds = [(idx, pmi_ij) for idx, pmi_ij in zip(contexts, pmi_i)]
most_relateds = sorted(most_relateds, key=lambda x: -x[1])[:num]
most_relateds = [(idx2vocab[idx], pmi_ij) for idx, pmi_ij in most_relateds if len(idx2vocab[idx]) > 1]
# 단어 k와 유사한 contexts vector를 지닌 단어를 찾습니다.
return most_relateds # 유사한 단어 출력
# 입력 word - sent 쌍으로 된 입력에 대해서 sentiment 점수를 excel에 저장해주고
# 신조어와 most_related (단어, pmi) 쌍을 출력해준다.
def cal_score(self, sentence): # 신어-문장으로 된 DataFrame 입력
mapping_most_related = defaultdict(lambda: 0) # 재실행할 때, 효율적으로 하기 위해서 mapping_most_related에 해당 most_related 저장
# dictionary 형태로 변환한다.
sent = defaultdict(lambda: 0)
for _ in range(len(sentence)):
sent[sentence['index'][_]] = sentence['0'][_]
score_dict = defaultdict(lambda: 0)
words = {_: 1.0 for _ in sent.keys()}
sentiment = pd.read_excel('sentiment.xlsx') # 여기서 sentiment.xlsx는 감성사전이다.
# 이는, 처음에 most_related 한 것들 중 길이가 1보다 큰 단어에 대해서 추출하여 직접 라벨링하였다.
# 각 단어에 대해서 sentiment 점수를 계산한다.
# sentiment 점수는 pmi 값 * 이전 sentiment 점수를 더하는 방식으로 update 된다.
for k, v in sent.items():
mapping_most_related[k] = self.extract_most_related(k,v,words)
pn_score = 0
for _ in mapping_most_related[k]:
if sum(sentiment[0] == _[0]) != 0:
pn_score += _[1] * sentiment[sentiment[0] == _[0]]['P/N'].iloc[0]
score_dict[k] = pn_score
# sentiment 점수를 엑셀 파일로 저장한다.
temp = pd.DataFrame(sorted(score_dict.items(), key=lambda _: _[1], reverse=True))
temp.to_excel('sentiment_result.xlsx')
return mapping_most_related
# sentiment_result에서 상위 3개씩 positive, negative 단어를 뽑아서 그에 대한 most_related 30개의 감성사전 score update
# 이 때, 이미 positive와 negative 목록에 있는 애들은 중복해서 update 되지 않도록 설정.
# 즉, 신조어에 대한 감성을 평가하기 위해 학습하는 과정이다.
def update_score(self, positive, negative, sentiment_result): # 이미 업데이트 된 positive 목록과 negative 목록, 업데이트하고자하는 감성분석 결과를 입력한다.
sent_dict = defaultdict(lambda: 0)
# (positive) top3 단어에 대해서, sent_dict에 '단어' : 'P' 로 추가
count = 0
for _ in sentiment_result[0]:
if _ not in positive:
sent_dict[_] = "P"
count += 1
if count > 3:
break
# sentiment_result 순서 반대로 변환
temp = list(sentiment_result[0])
sentiment_result = []
for _ in range(len(temp)):
sentiment_result.append(temp.pop(-1))
count = 0
# (negative) top3 단어에 대해서, sent_dict에 '단어' : 'N' 로 추가
for _ in sentiment_result:
if _ not in negative:
sent_dict[_] = "N"
count += 1
if count > 3:
break
# list 형태로 만들어서 positive, negative 목록에 추가
sent_dict = pd.DataFrame.from_dict(sent_dict, orient='index')
ptemp = list(sent_dict[sent_dict[0] == 'P'].index)
ntemp = list(sent_dict[sent_dict[0] == 'N'].index)
for _ in ptemp:
positive.append(_)
for _ in ntemp:
negative.append(_)
# (word, sentence) 쌍으로, pandas dataframe 생성
conn = sqlite3.connect('sent.db')
sent =
|
pd.read_sql('SELECT * FROM sent', conn)
|
pandas.read_sql
|
# -- --------------------------------------------------------------------------------------------------- -- #
# -- MarketMaker-BackTest -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- file: data.py -- #
# -- Description: Data sources and processing -- #
# -- --------------------------------------------------------------------------------------------------- -- #
# -- Author: IFFranciscoME - <EMAIL> -- #
# -- license: MIT License -- #
# -- Repository: https://github.com/IFFranciscoME/MarketMaker-BackTest -- #
# --------------------------------------------------------------------------------------------------------- #
# -- Load base packages
import pandas as pd
import numpy as np
import time
import json
# -- Cryptocurrency data and trading API
import ccxt
# -- Asyncronous data fetch
import asyncio
import ccxt.async_support as ccxt_async
# --------------------------------------------------------------------------- EXCHANGE TRANSACTIONS FEEs -- #
# --------------------------------------------------------------------------------------------------------- #
def fees_schedule(exchange, symbol, expected_volume):
"""
To get the fee schedule of an already initialized client-exchange, including the case where there is
a tierBased list provided by ccxt library.
Parameters
----------
exchange: str
with exchange to be connected to
expected_volume: numeric
With a montly expected volume of transactions, expressed in USD.
Returns
-------
r_exchange_fees: dict
with 'taker' and 'maker' fees expressed in basis points.
References
----------
All the information currently available is obtained from ccxt already integrated API to differen
exchanges, which according to [1] it support more than 120 bitcoin/altcoin exchanges. In order to
validate fee schedule of initialized client-exchange, please refer to the documentation of that
particular exchange. The example include reference url for two exchanges: Bitfinex [2] and Kraken [3].
[1] https://github.com/ccxt/ccxt
[2] https://www.bitfinex.com/fees/
[3] https://www.kraken.com/en-us/features/fee-schedule
"""
# Initialize client
client_exchange = getattr(ccxt, exchange)({'enableRateLimit': True})
client_markets = client_exchange.load_markets()
# In case ccxt has registered a tierBased list of fees
if client_markets[symbol]['tierBased']:
try:
# locate the clossest tier value according to a given monthly expected volume (provided in USD)
idx = np.array([abs(i[0] - expected_volume)
for i in client_markets[symbol]['tiers']['taker']]).argmin()
r_exchange_fees = {'taker': client_markets[symbol]['tiers']['taker'][idx][1],
'maker': client_markets[symbol]['tiers']['maker'][idx][1]}
except:
print('Tier was not found, returning the highest fee value')
# In case of exception in the tier search, return the values of the first position
r_exchange_fees = {'taker': client_markets[symbol]['tiers']['taker'][0][1],
'maker': client_markets[symbol]['tiers']['maker'][0][1]}
else:
# In case a tierBased fee schedule is not supported, the standard value returned from ccxt is used.
r_exchange_fees = {'taker': client_markets[symbol]['taker'],
'maker': client_markets[symbol]['maker']}
# Return final data
return r_exchange_fees
# --------------------------------------------------------------------------- ASYNCRONOUS ORDERBOOK DATA -- #
# --------------------------------------------------------------------------------------------------------- #
def order_book(symbol, exchanges, execution='async', stop=None, output=None, verbose=True):
"""
Asyncronous OrderBook data fetcher. It will asyncronously catch innovations of transactions whenever they
occur for every exchange is included in the list exchanges, and return the complete orederbook in a in a
JSON format or DataFrame format with 'ask', 'ask_size', 'bid', 'bid_size'.
Parameters
----------
symbol: list
with the names of instruments or markets to fetch the oredebook from.
exchanges: list
with the names of exchanges from where the data is intended to be fetched.
execution: str
'async': Asyncronous option to fetch several orderbooks in the same call. Depends on
asyncio and ccxt.async_support
'parallel': Run a parallel processing to deploy 1 instance for each symbol at each market. Depends
on multiprocessing (pending)
stop: dict
Criteria to stop the execution. Default behavior will be to stop after 1 minute of running.
'min_count': int
Stops when all orderbooks have, at least, this number of registred timestamps.
'target_timestamp': datetime
Stops when its reached a specific timestamp.
None: (default)
Stop when 1 minute has elapsed
output: str
Options for the output. Default is inplace
'JSON': will write a JSON file (pending)
'inplace': Delivers the result in a pd.DataFrame inplace
verbose: bool
To print in real time the fetched first ask and bid of every exchange.
Returns
-------
r_data: dict
A dictionary with the fetched data, with the following structure.
r_data = {
instrument: {
exchange: {
timestamp: {'ask': 1.4321, 'ask_size': 0.12,
'bid': 1.1234, 'bid_size': 0.21},
timestamp: {'ask': 1.4321, 'ask_size': 0.12,
'bid': 1.1234, 'bid_size': 0.21}
}
}
References
----------
[1] https://github.com/ccxt/ccxt
[2] https://docs.python.org/3/library/asyncio.html
"""
# Store data for every exchange in the list
r_data = {'kraken': {}, 'ftx': {}, 'currencycom': {}, 'coinmate': {}}
# ----------------------------------------------------------------------------- ASYNCRONOUS REQUESTS -- #
async def async_client(exchange, symbol):
# Await to be inside exchange limits of calls
# await asyncio.sleep(exchange.rateLimit / 1000)
# Initialize client inside the function, later will be closed, since this is runned asyncronuously
# more than 1 client could be created and later closed.
client = getattr(ccxt_async, exchange)({'enableRateLimit': True})
await client.load_markets()
# Check for symbol support on exchange
if symbol not in client.symbols:
raise Exception(exchange + ' does not support symbol ' + symbol)
# Initial time and counter
time_1 = time.time()
time_f = 0
# Loop until stop criteria is reached
while time_f <= 60:
# Try and await for client response
try:
# Fetch, await and get datetime
orderbook = await client.fetch_order_book(symbol)
datetime = client.iso8601(client.milliseconds())
# Verbosity
if verbose:
print(datetime, client.id, symbol, orderbook['bids'][0], orderbook['asks'][0])
# Unpack values
ask_price, ask_size = np.array(list(zip(*orderbook['asks']))[0:2])
bid_price, bid_size = np.array(list(zip(*orderbook['bids']))[0:2])
spread = np.round(ask_price - bid_price, 4)
# Final data format for the results
r_data[client.id].update({datetime: pd.DataFrame({'ask_size': ask_size, 'ask': ask_price,
'bid': bid_price, 'bid_size': bid_size,
'spread': spread}) })
# End time
time_2 = time.time()
time_f = round(time_2 - time_1, 4)
# In case something bad happens with client
except Exception as e:
print(type(e).__name__, e.args, str(e))
pass
# Close client
await client.close()
# ------------------------------------------------------------------------------ MULTIPLE ORDERBOOKS -- #
async def multi_orderbooks(exchanges, symbol):
# A list of routines (and parameters) to run
input_coroutines = [async_client(exchange, symbol) for exchange in exchanges]
# wait for responses
await asyncio.gather(*input_coroutines, return_exceptions=True)
# Run event loop in async
if execution=='async':
asyncio.get_event_loop().run_until_complete(multi_orderbooks(exchanges, symbol))
# Run multiple events in parallel
elif execution=='parallel':
raise ValueError('Only supported async')
# Raise error in case of other value
else:
raise ValueError(execution, 'is not supported as a type of execution')
# ----------------------------------------------------------------------------------- TYPE OF OUTPUT -- #
# A JSON file writen in directory
if output == 'JSON':
# Serializing json
json_object =
|
pd.DataFrame(r_data)
|
pandas.DataFrame
|
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='5min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 11 + [4] * 11, 'y', [50], [2]),
# invalid axis
pytest.param([0] * 11 + [4] * 11, 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
# insufficient observation data
pytest.param([5.3, 7.3, 1.4] * 4, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([], 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([None]*10, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic_timeofday(site_metadata, obs_values, axis,
constant_values, expected_values):
tz = 'UTC'
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_end = pd.Timestamp('20190513T0900', tz=tz)
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("data_end,forecast_start", [
# no timezone
(pd.Timestamp("20190513T0900"), pd.Timestamp("20190514T0900")),
# same timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# different timezone
(
pd.Timestamp("20190513T0200", tz="US/Pacific"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# obs timezone, but no fx timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900")
),
# no obs timezone, but fx timezone
(
pd.Timestamp("20190513T0900"),
pd.Timestamp("20190514T0900", tz="UTC")
),
])
def test_persistence_probabilistic_timeofday_timezone(site_metadata, data_end,
forecast_start):
obs_values = [0] * 11 + [20] * 11
axis, constant_values, expected_values = 'x', [10, 20], [50, 100]
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_end = forecast_start + pd.Timedelta("1h")
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
# if forecast without timezone, then use obs timezone
if data.index.tzinfo is not None and forecast_start.tzinfo is None:
expected_index = expected_index.tz_localize(data.index.tzinfo)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 15 + [20] * 15, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 15 + [4] * 15, 'y', [50], [2]),
([None] * 30, 'y', [50], [None]),
([0] * 10 + [None] * 10 + [20] * 10, 'x', [10, 20], [50, 100]),
([0] * 10 + [None] * 10 + [4] * 10, 'y', [50], [2]),
])
def test_persistence_probabilistic_resampling(
site_metadata,
interval_label,
obs_values, axis,
constant_values,
expected_values
):
tz = 'UTC'
interval_length = '1min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end =
|
pd.Timestamp('20190513 1300', tz=tz)
|
pandas.Timestamp
|
# -*- coding: UTF-8 -*-
import os, sys; demo_dir = os.path.dirname(os.path.abspath(__file__))
import pandas as pd
from random import uniform, randint
try:
from pybillboard_js.billboarder import *
except:
sys.path.append(os.path.dirname(demo_dir))
from pybillboard_js.billboarder import *
# default source dataframe
tbl_source = pd.DataFrame([[uniform(0, 10), uniform(0, 10)] for i in range(10)], columns = ["A", "B"])
# range style dataframe
tbl_source_range =
|
pd.DataFrame()
|
pandas.DataFrame
|
from matplotlib import pyplot as plt
import csv
from absl import app, flags, logging
from absl.flags import FLAGS
import os
import scipy.io
import numpy as np
import cv2
import tqdm
from sklearn.metrics import average_precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
import pandas as pd
import seaborn as sns
import datetime
import glob
import re
import string
import sys
import cv2
import re
import ast
import shutil
import random
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
import copy
import collections
def _check_ext(path, default_ext):
name, ext = os.path.splitext(path)
if ext == '':
if default_ext[0] == '.':
default_ext = default_ext[1:]
path = name + '.' + default_ext
return path
def save_yaml(path, data, **kwargs):
import oyaml as yaml
path = _check_ext(path, 'yml')
with open(path, 'w') as f:
yaml.dump(data, f, **kwargs)
def convert_categorical_str_to_numerical(category_list):
"""
Takes a category list of strings and converts it to integers, e.g:
category_list = [dog, cat, horse, dog, cow]
return: [0, 1, 2, 0, 3]
:param category_list: (list) list of string categories
:return: (list)
"""
unique = list(np.unique(category_list))
return [unique.index(u) for u in category_list]
def match_pair_of_data(data_file_1, data_file_2):
"""
matches pairs of data from two csv files
:param data_file_1: (str) CSV file absolute path
:param data_file_2: (str) CSV file absolute path
:return: (list, list) list of numerical values for a list of inputs that matches name
"""
y_test = []
y_pred = []
data_file1 = pd.read_csv(data_file_1)
data_file2 = pd.read_csv(data_file_2)
gt_categories = convert_categorical_str_to_numerical(data_file2['tissue type'].tolist())
gt_file_names = data_file2['image_name'].tolist()
predict_fnames = data_file1['fname'].tolist()
predict_categories = data_file1['class_2'].tolist()
print(f'found {len(gt_file_names)} cases in file 1 and {len(predict_fnames)} cases in file 2')
for i, name in enumerate(predict_fnames):
if name in gt_file_names:
y_pred.append(float(predict_categories[i]))
y_test.append(float(gt_categories[gt_file_names.index(name)]))
print(f'{len(y_test)} cases matched names')
return y_test, y_pred
def calculate_auc_and_roc(predicted, real, case_name, plot=True, results_directory='',
results_id='', save_plot=False):
"""
:param predicted:
:param real:
:param case_name:
:param plot:
:param results_directory:
:param results_id:
:param save_plot:
:return:
"""
y_test, y_pred = match_pair_of_data(predicted, real)
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred)
auc_keras = auc(fpr_keras, tpr_keras)
plt.figure()
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label=case_name + '(area = {:.3f})'.format(auc_keras))
# plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
if save_plot is True:
name_fig = ''.join(['roc_', results_id, '_.png'])
plt.savefig(results_directory + name_fig)
if plot is True:
plt.show()
plt.close()
return auc_keras
def check_file_isvid(filename):
"""
checks if a file has a video extension, accepted files are: '.mp4', '.mpg', '.avi'
:param filename: (str) name of the file
:return: (bool)
"""
list_extensions = ['.mpg', '.MPG', '.mp4', '.MP4', '.AVI', '.avi']
if filename[-4:] in list_extensions:
return True
else:
return False
def get_video_files_in_dir(dir_dataset):
"""
Given a directory checks if there are any video files and return the absolute path of the video files in a list
:param dir_dataset: (str) directory
:return: (list) list of video files
"""
initial_list_files = os.listdir(dir_dataset)
list_folders = []
list_video_files = []
for file_name in initial_list_files:
if os.path.isdir(dir_dataset + file_name):
list_folders.append(file_name)
else:
if file_name[-4:] not in list_video_files:
list_video_files.append(dir_dataset + file_name)
for folder in list_folders:
list_files = os.listdir(dir_dataset + folder)
for file_name in list_files:
if file_name[-4:] not in list_video_files:
list_video_files.append(''.join([dir_dataset, folder, '/', file_name]))
return list_video_files
def analyze_video_dataset(dir_dataset):
"""
Analyzes a dataset of video showing the number of frames of each video
:param dir_dataset: (str) directory of the dataset
:return:
"""
list_video_files = get_video_files_in_dir(dir_dataset)
print(f"found {len(list_video_files)} video files")
num_frames = []
name_videos = []
for path_to_video in list_video_files:
cap = cv2.VideoCapture(path_to_video)
name_videos.append(path_to_video.replace(dir_dataset, ''))
num_frames.append(cap.get(cv2.CAP_PROP_FRAME_COUNT))
df = pd.DataFrame(data={"name file": name_videos, "num frames": num_frames})
def find_pattern_names(string_name, str_pattern):
"""
Looks for a pattern name in a string and returns the number after it
:param string_name: the string where to look for a pattern
:param str_pattern: the pattern that needs to be found
:return:
"""
match = re.search(str_pattern + '(\d+)', string_name)
if match:
return match.group(1)
else:
return np.nan
def determine_type_procedure(file_name):
"""
Determine which type of procedure is according to the name of the file
:param file_name:
:return:
"""
types_procedures = ['cys', 'urs']
for kind in types_procedures:
if kind in file_name:
return kind
def analyze_dataset_patterns(dataset_dir, pattern_str):
"""
Analyze a dataset to find a patter after a string
:param dataset_dir:
:param pattern_str:
:return:
"""
list_files = os.listdir(dataset_dir)
unique_names = []
for file_name in list_files:
pattern = find_pattern_names(file_name, pattern_str)
type_procedure = determine_type_procedure(file_name)
combination = [type_procedure, pattern]
if combination not in unique_names:
unique_names.append(combination)
return unique_names
def read_mask(dir_image):
"""
:param dir_image:
:return:
"""
original_img = cv2.imread(dir_image)
if original_img is None:
print('Could not open or find the image:', dir_image)
exit(0)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
img = img / 255
img = (img > 0.9) * 1.0
return img
def read_img_results(dir_image):
"""
:param dir_image:
:return:
"""
original_img = cv2.imread(dir_image)
if original_img is None:
print('Could not open or find the image:', dir_image)
exit(0)
height, width, depth = original_img.shape
img = cv2.resize(original_img, (256, 256))
return img
def compare_box_plots(general_directory = '', name_test_csv_file='', name_validation_csv_file='',
save_directory='', condition_name=''):
"""
:param general_directory:
:param name_test_csv_file:
:param name_validation_csv_file:
:param save_directory:
:param condition_name:
:return:
2DO: Handle list of dirs and exclusion conditions
"""
predictions_path = ''.join([general_directory, 'predictions'])
prediction_folders = sorted([f for f in os.listdir(predictions_path)])
file_names = []
dsc_values = {}
prec_values = {}
rec_values = {}
acc_values = {}
if general_directory != '' and type(general_directory) == str:
csv_files = sorted([f for f in os.listdir(general_directory) if 'evaluation_results' in f and f.endswith('.csv')])
print(csv_files)
count_id = 0
for i, folder in enumerate(prediction_folders):
if folder in csv_files[i]:
file_names.append(folder)
else:
file_names.append('dataset_'+str(count_id))
count_id =+1
data_file = pd.read_csv(general_directory + csv_files[i])
dsc_values[file_names[i]] = data_file['DSC'].tolist()
prec_values[file_names[i]] = data_file['Precision'].tolist()
rec_values[file_names[i]] = data_file['Recall'].tolist()
acc_values[file_names[i]] = data_file['Accuracy'].tolist()
else:
pass
dsc_data = pd.DataFrame.from_dict(dsc_values, orient='index').T
prec_data = pd.DataFrame.from_dict(prec_values, orient='index').T
rec_data = pd.DataFrame.from_dict(rec_values, orient='index').T
acc_data =
|
pd.DataFrame.from_dict(acc_values, orient='index')
|
pandas.DataFrame.from_dict
|
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
import json
import time
from selenium.webdriver.common.keys import Keys
import urllib.request
chrome_path = r"C:\Users\<NAME>\Downloads\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get('https://www.instagram.com/explore/tags/pandora/')
html_source = driver.page_source
# data = html_source.encode('utf-8')
time.sleep(1)
driver.find_element_by_xpath("""//*[@id="react-root"]/section/main/article/div[2]/div[3]/a""").click()
time.sleep(1)
elem = driver.find_element_by_tag_name("body")
no_of_pagedowns = 5000
while no_of_pagedowns:
elem.send_keys(Keys.PAGE_DOWN)
time.sleep(0.2)
no_of_pagedowns -= 1
html_source = driver.page_source
soup = BeautifulSoup(html_source, "lxml")
# PostsData = []
urlList = []
dList = []
# for sap in soup.findAll('div', {'class': '_jjzlb'}):
# sap2 = BeautifulSoup(str(sap), 'html.parser')
# for sap3 in sap2.findAll('img'):
# try:
# PostsData.append(sap3['alt'])
# except:
# pass
for sapURL in soup.findAll('a', {'class': '_8mlbc _vbtk2 _t5r8b'}):
try:
urlList.append("http://www.instagram.com" + sapURL['href'])
except:
pass
lenList1 = len(urlList)
UserName = []
FullName = []
Caption = []
Nlikes = []
Nviews = []
Ncomments = []
PostingDate = []
for le in range(0, lenList1):
try:
catUrl = urlList[le]
catRequest = urllib.request.Request(catUrl, headers={'content-type': 'application/json'})
catResponse = urllib.request.urlopen(catRequest)
catHtml = catResponse.read().decode('utf-8')
soup = BeautifulSoup(catHtml, 'html.parser')
for sap in soup.findAll('script', {'type': 'text/javascript'}):
if sap.string is not None:
if sap.string[0:18] == "window._sharedData":
with open("Output_File.txt", "w") as text_file:
text_file.write(sap.string[21:-1])
with open('Output_File.txt', 'r') as fobj:
data = json.load(fobj)
with open('Output_JSON.json', 'w') as fobj:
json.dump(data, fobj)
try:
if data['entry_data']['PostPage'][0]['media']['owner']['username'] is not None:
UserName.append(data['entry_data']['PostPage'][0]['media']['owner']['username'])
else:
UserName.append("N/A")
except KeyError:
UserName.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['owner']['full_name'] is not None:
FullName.append(data['entry_data']['PostPage'][0]['media']['owner']['full_name'])
else:
FullName.append("N/A")
except KeyError:
FullName.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['caption'] is not None:
Caption.append(data['entry_data']['PostPage'][0]['media']['caption'])
else:
Caption.append("N/A")
except KeyError:
Caption.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['likes']['count'] is not None:
Nlikes.append(data['entry_data']['PostPage'][0]['media']['likes']['count'])
else:
Nlikes.append("N/A")
except KeyError:
Nlikes.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['is_video'] == True:
if data['entry_data']['PostPage'][0]['media']['video_views'] is not None:
Nviews.append(data['entry_data']['PostPage'][0]['media']['video_views'])
else:
Nviews.append("N/A")
else:
Nviews.append(0)
except KeyError:
Nviews.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['comments']['count'] is not None:
Ncomments.append(data['entry_data']['PostPage'][0]['media']['comments']['count'])
else:
Ncomments.append("N/A")
except KeyError:
Ncomments.append("N/A")
try:
if data['entry_data']['PostPage'][0]['media']['date'] is not None:
PostingDate.append(
time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(data['entry_data']['PostPage'][0]['media']['date'])))
else:
PostingDate.append("N/A")
except KeyError:
PostingDate.append("N/A")
except:
pass
lenList = len(Caption)
for le in range(0, lenList):
dDict = {}
dDict['Post Caption'] = Caption[le]
# dDict['Link'] = urlList[le]
dDict['User Name'] = UserName[le]
dDict['Full Name'] = FullName[le]
dDict['Number of likes'] = Nlikes[le]
dDict['Number of Views'] = Nviews[le]
dDict['Number of comments'] = Ncomments[le]
dDict['Posting date'] = PostingDate[le]
dList.append(dDict)
df = pd.DataFrame(dList)
df.to_csv('Pandora_new.csv', sep=',', index=False)
lenList = len(UserName)
dList = []
for le in range(0, lenList):
dDict = {}
dDict['User Name'] = UserName[le]
dList.append(dDict)
df = pd.DataFrame(dList)
df.to_csv('Pandora_UserName.csv', sep=',', index=False)
dList = []
lenList = len(urlList)
for le in range(0, lenList):
dDict = {}
dDict['URL'] = urlList[le]
dList.append(dDict)
df =
|
pd.DataFrame(dList)
|
pandas.DataFrame
|
# @Author: <NAME><Nareshvrao>
# @Date: 2020-12-22, 12:44:08
# @Last modified by: Naresh
# @Last modified time: 2019-12-22, 1:13:26
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
from util.utils import *
def compute_detail_score(df, dice):
res = []
res.append(df[dice].mean())
#c1 -> c4 dice
for label in ['Fish', 'Flower', 'Gravel', 'Sugar']:
df_tmp = df[df['cls'] == label]
res.append(df_tmp[dice].mean())
# neg & pos dice
res.append(df[df['truth'] == ''][dice].mean())
res.append(df[df['truth'] != ''][dice].mean())
# c1 -> c4 pos
for label in ['Fish', 'Flower', 'Gravel', 'Sugar']:
df_tmp = df[df['cls'] == label]
res.append(df_tmp[df_tmp['truth'] != ''][dice].mean())
return res
def ensemble_rles(rles1, rles2, mode='intersect'):
res = []
for rle1, rle2 in tqdm.tqdm(zip(rles1, rles2)):
m1 = rle2mask(rle1, height=350, width=525, fill_value=1)
m2 = rle2mask(rle2, height=350, width=525, fill_value=1)
if mode == 'intersect':
mask = ((m1+m2) == 2).astype(int)
elif mode == 'union':
mask = ((m1+m2) > 0).astype(int)
else:
RuntimeError('%s not implemented.'%mode)
rle = mask2rle(mask)
res.append(rle)
return res
def load_stacking(seg_name, tta, ts=0.5):
df_seg_val = pd.read_csv('../output/'+seg_name+'/valid_5fold_tta%d.csv'%tta)
df_seg_test = pd.read_csv('../output/'+seg_name+'/test_5fold_tta%d.csv'%tta)
df_seg_val['s1'], df_seg_test['s1'] = np.nan, np.nan
df_seg_val['s1'].loc[df_seg_val.pred >= ts] = '1 1'
df_seg_test['s1'].loc[df_seg_test.pred >= ts] = '1 1'
return df_seg_val[['Image_Label', 's1']], df_seg_test[['Image_Label', 's1']]
def load_seg_pred(seg_name, name, tta):
#load val
df_val = []
try:
for fold in range(5):
if tta <= 1:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_fold%d.csv'%fold))
else:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_fold%d_tta%d.csv'%(fold, tta)))
df_val = pd.concat(df_val)
except:
df_val = pd.read_csv('../output/'+ seg_name + '/' + 'valid_5fold_tta%d.csv'%(tta))
df_val = df_val[['Image_Label', 'EncodedPixels']]
#df_val.rename(columns={'s3': 'EncodedPixels'}, inplace=True)
df_test = pd.read_csv('../output/'+ seg_name + '/' + 'test_5fold_tta%d.csv'%tta)
df_val.rename(columns={'EncodedPixels': name}, inplace=True)
df_test.rename(columns={'EncodedPixels': name}, inplace=True)
return df_val, df_test
def load_seg_cls_pred(seg_name, name, tta, ts):
#load val
df_val = []
try:
for fold in range(5):
if tta <= 1:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_cls_fold%d.csv'%fold))
else:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_cls_fold%d_tta%d.csv'%(fold, tta)))
df_val = pd.concat(df_val)
except:
df_val = pd.read_csv('../output/'+ seg_name + '/' + 'valid_5fold_tta%d.csv'%(tta))
df_val = df_val[['Image_Label', 'EncodedPixels']]
#df_val.rename(columns={'s3': 'EncodedPixels'}, inplace=True)
df_test = pd.read_csv('../output/'+ seg_name + '/' + 'test_cls_5fold_tta%d.csv'%tta)
df_val['EncodedPixels'] = '1 1'
df_val['EncodedPixels'].loc[df_val['0'] < ts] = np.nan
df_test['EncodedPixels'] = '1 1'
df_test['EncodedPixels'].loc[df_test['0'] < ts] = np.nan
df_val.rename(columns={'EncodedPixels': name}, inplace=True)
df_test.rename(columns={'EncodedPixels': name}, inplace=True)
return df_val, df_test
def load_classifier(classifier, tta):
try:
df_cls_val = []
df_cls_test = []
for fold in range(5):
if tta <= 1:
df_cls_val.append(pd.read_csv('../output/'+ classifier + '/' + 'valid_cls_fold%d.csv'%fold))
df_cls_test.append(pd.read_csv('../output/'+ classifier + '/' + 'test_cls_fold%d.csv'%fold))
else:
df_cls_val.append(pd.read_csv('../output/'+ classifier + '/' + 'valid_cls_fold%d_tta%d.csv'%(fold, tta)))
df_cls_test.append(pd.read_csv('../output/'+ classifier + '/' + 'test_cls_fold%d_tta%d.csv'%(fold, tta)))
df_cls_val = pd.concat(df_cls_val)
df_tmp = df_cls_test[0]
for i in range(1, 5):
assert(np.sum(df_tmp['Image_Label'] != df_cls_test[i]['Image_Label']) == 0)
df_tmp['0'] += df_cls_test[i]['0']
df_tmp['0'] /= 5
df_cls_test = df_tmp
except:
df_cls_val = pd.read_csv('../output/'+ classifier + '/' + 'valid_cls_5fold_tta%d.csv'%tta)
df_cls_test = pd.read_csv('../output/'+ classifier + '/' + 'test_cls_5fold_tta%d.csv'%tta)
df_cls_val.rename(columns={'0': 'prob'}, inplace=True)
df_cls_test.rename(columns={'0': 'prob'}, inplace=True)
return df_cls_val, df_cls_test
df_train = pd.read_csv('../input/train_350.csv')
df_train.rename(columns={'EncodedPixels': 'truth'}, inplace=True)
_save=1
tta=3
seg1 = 'densenet121-FPN-BCE-warmRestart-10x3-bs16'
seg2 = 'b5-Unet-inception-FPN-b7-Unet-b7-FPN-b7-FPNPL'
classifier = 'efficientnetb1-cls-BCE-reduceLR-bs16-PL'
# load classifier results
if classifier:
if 'stacking' in classifier:
df_cls_val = pd.read_csv('../output/'+classifier+'/valid_5fold_tta%d.csv'%tta).rename(columns={'pred': 'prob'})
df_cls_test = pd.read_csv('../output/'+classifier+'/test_5fold_tta%d.csv'%tta).rename(columns={'pred': 'prob'})
else:
df_cls_val, df_cls_test = load_classifier(classifier, tta)
# load seg results
if isinstance(seg1, list):
df_seg1_val, df_seg1_test = load_seg_pred(seg1[0], 's1', tta)
for i in range(1, len(seg1)):
d1, d2 = load_seg_pred(seg1[i], 's1', tta)
df_seg1_val['s1'].loc[d1.s1.isnull()] = np.nan
df_seg1_test['s1'].loc[d2.s1.isnull()] = np.nan
elif 'stacking' in seg1:
df_seg1_val, df_seg1_test = load_stacking(seg1, 3, ts=0.54)
else:
df_seg1_val, df_seg1_test = load_seg_pred(seg1, 's1', 1)
df_seg2_val, df_seg2_test = load_seg_pred(seg2, 's2', tta)
# merge seg valid
df_seg_val = pd.merge(df_seg1_val, df_seg2_val, how='left')
df_seg_val = pd.merge(df_seg_val, df_train, how='left')
if classifier:
df_seg_val =
|
pd.merge(df_seg_val, df_cls_val[['Image_Label', 'prob']], how='left')
|
pandas.merge
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_df_boolean_comparison_error(self):
# GH#4576
# boolean comparisons with a tuple/list give unexpected results
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
# not shape compatible
with pytest.raises(ValueError):
df == (2, 2)
with pytest.raises(ValueError):
df == [2, 2]
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
with pytest.raises(TypeError):
df.__eq__(None)
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH#15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH#15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameArithmetic(object):
@pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano')
def test_df_sub_datetime64_not_ns(self):
df = pd.DataFrame(pd.date_range('20130101', periods=3))
dt64 = np.datetime64('2013-01-01')
assert dt64.dtype == 'datetime64[D]'
res = df - dt64
expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1),
pd.Timedelta(days=2)])
tm.assert_frame_equal(res, expected)
@pytest.mark.parametrize('data', [
[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT],
['x', 'y', 1]])
@pytest.mark.parametrize('dtype', [None, object])
def test_df_radd_str_invalid(self, dtype, data):
df = pd.DataFrame(data, dtype=dtype)
with pytest.raises(TypeError):
'foo_' + df
@pytest.mark.parametrize('dtype', [None, object])
def test_df_with_dtype_radd_int(self, dtype):
df = pd.DataFrame([1, 2, 3], dtype=dtype)
expected = pd.DataFrame([2, 3, 4], dtype=dtype)
result = 1 + df
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
import json
import pandas as pd
import pathlib
import datetime
from modules import constant, app_element
def generate_dataframe():
# Main DataFrame
DATAFRAME_MAIN =
|
pd.read_csv(constant.DATAFILE)
|
pandas.read_csv
|
# Pandas Time Series
import pandas as pd
import numpy as np
from datetime import datetime
from dateutil import parser
from pandas_datareader import data
import matplotlib.pyplot as plt
def simple_example():
# datetime in python
datetime_1 = datetime(year=2015, month=7, day=4)
print(datetime_1)
date = parser.parse("4th of July, 2015")
print(date.strftime('%A'))
date_2 = np.array('2015-07-04', dtype=np.datetime64)
print(date_2)
print(date_2 + np.arange(12))
np.datetime64('2015-07-04')
np.datetime64('2015-07-04 12:00')
print(np.datetime64('2015-07-04 12:59:59.50', 'ns'))
def time_series():
index = pd.DatetimeIndex(['2014-07-04', '2014-08-04',
'2015-07-04', '2015-08-04'])
data =
|
pd.Series([0, 1, 2, 3], index=index)
|
pandas.Series
|
import MDAnalysis
import MDAnalysis.analysis.hbonds
import pandas as pd
import numpy as np
import os
from collections import defaultdict
import networkx as nx
import matplotlib.pyplot as plt
import sys
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
#logger.addHandler(logging.FileHandler('test.log', 'a'))
print = logger.info
sys.setrecursionlimit(1000)
print(sys.getrecursionlimit())
class HB_MD:
def __init__(self, frame):
self.direct_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.one_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.two_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.three_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.four_water_connection = pd.DataFrame(columns = ['donor_residue', 'acceptor_residue'])
self.hb_analysis(frame, self.direct_connection, self.one_water_connection, self.two_water_connection, self.three_water_connection, self.four_water_connection)
return
def addEdge(self, graph,u,v):
graph[u].append(v)
def generate_edges(self, graph):
edges = []
for node in graph:
for neighbour in graph[node]:
edges.append((node, neighbour))
return edges
def find_path(self, graph, start, end, path =[]):
path = path + [start]
if start == end:
return path
for node in graph[start]:
if node not in path:
newpath = find_path(graph, node, end, path)
if newpath:
return newpath
return None
def find_all_path(self, graph, start, path, paths):
if len(path) == 6:
return paths.append(list(path))
if len(graph[start]) == 0:
return paths.append(list(path))
for node in graph[start]:
if node in path:
continue
path.append(node)
self.find_all_path(graph, node, path, paths)
path.pop()
def get_chain(self, frame, chain):
i = 0
pdb = open(frame, 'r')
#os.system('sed -i "s/1H / H1/" hoh.pdb')
for line in pdb:
#line.replace('HOH', 'TIP3')
if line[0:4] != 'ATOM':
continue
chain[i] = line[21:22]
i += 1
return
def MDtraj(self, pdb):
#print('Getting coordinate')
h3 = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(pdb, 'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL',
'not resname ALA and not resname GLN and not resname GLY and not resname ILE and not resname LEU and not resname PHE and not resname PRO and not resname VAL', distance=3.5, angle=90.0, acceptors = {'O1', 'O2'})
#print('Analyzing')
h3.run()
#print('Generating table')
h3.generate_table()
#print('Generating form')
df3 = pd.DataFrame.from_records(h3.table)
h3.generate_table()
df3 = pd.DataFrame.from_records(h3.table)
return df3
def get_all_connection(self, df3, chain, index_donor, index_accept):
for index2, row2 in df3.iterrows():
if row2['donor_resnm'] == 'TIP3'and row2['acceptor_resnm'] != 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] != 'TIP3':
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
elif row2['acceptor_resnm'] == 'TIP3' and row2['donor_resnm'] == 'TIP3':
if row2['donor_atom'] == 'H1':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-1))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
if row2['donor_atom'] == 'H2':
index_donor.append(row2['donor_resnm'] + '_' + str(row2['donor_index']-2))
index_accept.append(row2['acceptor_resnm'] + '_' + str(row2['acceptor_index']))
else:
index_donor.append(row2['donor_resnm'] + '_' + chain[row2['donor_index']] + '_' + str(row2['donor_resid']))
index_accept.append(row2['acceptor_resnm'] + '_' + chain[row2['acceptor_index']] + '_' + str(row2['acceptor_resid']))
return
def divide_networks(self, hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2):
#print('Divide networks')
for row in range(len(hb_two)):
if hb_two['donor_residue'][row][0:3] != 'TIP' and hb_two['acceptor_residue'][row][0:3] != 'TIP':
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue.append(hb_two['donor_residue'][row])
acceptor_residue.append(hb_two['acceptor_residue'][row])
else:
if hb_two['donor_residue'][row] == hb_two['acceptor_residue'][row]:
continue
else:
donor_residue2.append(hb_two['donor_residue'][row])
acceptor_residue2.append(hb_two['acceptor_residue'][row])
return
def count_water_num(self, path, donor, accept, wat_num):
#print('Count number of water in paths')
for item in path:
donor_column = [item[0]]
accpt_column = []
count = 0
for r in range(1, len(item)):
if item[r][0:3] != 'TIP':
donor_column.append(item[r])
accpt_column.append(item[r])
wat_num.append(count)
count = 0
else:
count += 1
if len(donor_column) > len(accpt_column):
donor_column.pop()
else:
accpt_column.pop()
donor.extend(donor_column)
accept.extend(accpt_column)
return
#c = u.select_atoms("protein and prop z > 85 or around 3.0 protein and prop z > 85 ")
#c.write('/Users/zhangyingying/Dropbox (City College)/Yingying/large_file/new_trajectories_PSII_wt/cut_frame32_50_test.pdb')
def hb_analysis(self, frame, direct_connection, one_water_connection, two_water_connection, three_water_connection, four_water_connection):
chain = {}
graph = defaultdict(list)
pdb = MDAnalysis.Universe(frame)
self.get_chain(frame, chain)
df3 = self.MDtraj(pdb)
index_donor = []
index_accept = []
self.get_all_connection(df3, chain, index_donor, index_accept)
df3['donor_residue'] = index_donor
df3['acceptor_residue'] = index_accept
dic_hdonnor = {'ASP':['HD1', 'HD2'], 'ARG': ['HH11', 'HH12', 'HH21', 'HH22', 'HE'], 'GLU':['HE1', 'HE2'], 'HIS':['HD1', 'HE2'], 'HSD':['HD1', 'HE2'], 'HSE':['HD1', 'HE2'], 'HSP':['HD1', 'HE2'],
'SER':['HG'], 'THR':['HG1'], 'ASN':['HD21', 'HD22'], 'GLN':['HE21', 'HE22'], 'CYS':['HG'], 'TYR':['HH'], 'TRP':['HE1'], 'LYS':['HZ1', 'HZ2', 'HZ3'], 'TIP3':['H1', 'H2'], 'HOH':['1H', '2H']}
dic_accept = {'ASP':['OD1', 'OD2'], 'HCO': ['OC1', 'OC2'], 'ARG': ['NE', 'NH1', 'NH2'], 'GLU':['OE1', 'OE2'], 'HSD':['ND1', 'NE2'], 'HSE':['ND1', 'NE2'], 'HSP':['ND1', 'NE2'], 'HIS':['ND1', 'NE2'],
'SER':['OG'], 'THR':['OG1'], 'ASN':['OD1'], 'GLN':['OE1'], 'CYS':['SG'], 'TYR':['OH'], 'LYS':['NZ'], 'MET':['SD'], 'CLX':['CLX'], 'CLA':['CLA'], 'OX2':['OX2'], 'PL9':['O1', 'O2'], 'FX':['FX'], 'TIP3':['OH2'], 'HOH':['O'], 'MQ8':['O1', 'O2']}
donor_residue_pick = []
acceptor_residue_pick = []
for index, row in df3.iterrows():
if row['donor_resnm'] in dic_hdonnor.keys() and row['acceptor_resnm'] in dic_accept.keys():
if row['donor_atom'] in dic_hdonnor[row['donor_resnm']] and row['acceptor_atom'] in dic_accept[row['acceptor_resnm']]:
donor_residue_pick.append(row['donor_residue'])
acceptor_residue_pick.append(row['acceptor_residue'])
else:
continue
hb_two = pd.DataFrame({'donor_residue':donor_residue_pick, 'acceptor_residue':acceptor_residue_pick})
donor_residue = []
acceptor_residue = []
donor_residue2 = []
acceptor_residue2 = []
self.divide_networks(hb_two, donor_residue, acceptor_residue, donor_residue2, acceptor_residue2)
dire_con = pd.DataFrame({'donor_residue': donor_residue, 'acceptor_residue': acceptor_residue, 'wat_num': [0]*len(donor_residue)})
wat_con = pd.DataFrame({'donor_residue': donor_residue2, 'acceptor_residue': acceptor_residue2})
# connection via water
wat_con = wat_con.drop_duplicates()
wat_con.index = range(0, len(wat_con))
# direct connection
dire_con = dire_con.drop_duplicates()
dire_con.index = range(0, len(dire_con))
#wat_con.to_csv('/Users/zhangyingying/Dropbox (City College)/Yingying/PSII/quinone/hb_network/conncetion_hoh_frame32_50.csv')
#print('Generating graph')
for i in range(len(wat_con)):
self.addEdge(graph, wat_con['donor_residue'][i], wat_con['acceptor_residue'][i])
visited = []
path = []
#print('Finding all paths through water')
for res in range(len(wat_con)):
results = []
if wat_con['donor_residue'][res] not in visited and wat_con['donor_residue'][res][0:3] != 'TIP':
self.find_all_path(graph, wat_con['donor_residue'][res], [wat_con['donor_residue'][res]], results)
path = path + results
visited.append(wat_con['donor_residue'][res])
else:
continue
donor = []
accept = []
wat_num = []
self.count_water_num(path, donor, accept, wat_num)
# put all the connection together get the network
res_wat_res = pd.DataFrame({'donor_residue': donor, 'acceptor_residue': accept, 'wat_num': wat_num})
res_wat_res = res_wat_res.drop_duplicates()
hb_network = pd.concat([dire_con, res_wat_res])
hb_network.index = range(0, len(hb_network))
visited_1 = []
visited_2 = []
visited_3 = []
visited_4 = []
for i in range(0, len(hb_network)):
if hb_network['wat_num'][i] == 0:
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
direct_connection = direct_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 1 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_1:
visited_1.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
one_water_connection = one_water_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 2 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_2:
visited_2.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row = pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
two_water_connection = two_water_connection.append(new_row, ignore_index=True)
if hb_network['wat_num'][i] <= 3 and [hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]] not in visited_3:
visited_3.append([hb_network['donor_residue'][i], hb_network['acceptor_residue'][i]])
new_row =
|
pd.Series({'donor_residue': hb_network['donor_residue'][i], 'acceptor_residue': hb_network['acceptor_residue'][i]})
|
pandas.Series
|
import pandas as pd
from root_pandas import read_root, to_root
from root_numpy import list_branches
from root_numpy import array2root
from pandas.util.testing import assert_frame_equal
import numpy as np
import ROOT
import os
import warnings
from nose.tools import assert_raises
def test_read_write():
df = pd.DataFrame({'x': [1, 2, 3]})
df.to_root('tmp.root')
df_ = read_root('tmp.root')
os.remove('tmp.root')
df.to_root('tmp.root', key='mykey')
df_ = read_root('tmp.root', key='mykey')
assert_frame_equal(df, df_)
os.remove('tmp.root')
tf = ROOT.TFile('tmp.root', 'recreate')
tt = ROOT.TTree("a", "a")
x = np.array([1])
x[0] = 42
tt.Branch('x', x, 'x/D')
tt.Fill()
x[0] = 1
tt.Fill()
tt.Write()
tf.Close()
# Read when no index is present
df = read_root('tmp.root', columns=['x'])
os.remove('tmp.root')
def test_ignore_columns():
df = pd.DataFrame({'x': [1, 2, 3], 'y1': [2, 3, 4], 'y2': [3, 4, 5]})
df.to_root('tmp.root')
df = read_root('tmp.root', ignore=['y1'])
assert(df.columns[0] == 'x' and df.columns[1] == 'y2')
df = read_root('tmp.root', ignore=['y*'])
assert(df.columns == ['x'])
# Test interaction with columns kwarg
df = read_root('tmp.root', columns=['y*'], ignore=['*1'])
assert(df.columns == ['y2'])
os.remove('tmp.root')
def test_persistent_index():
df = pd.DataFrame({'index': [42, 0, 1], 'x': [1, 2, 3]})
df = df.set_index('index')
df.index.name = 'MyAwesomeName'
df.to_root('tmp.root')
assert('__index__MyAwesomeName' in list_branches('tmp.root'))
df_ = read_root('tmp.root')
assert_frame_equal(df, df_)
os.remove('tmp.root')
# See what happens if the index has no name
df = pd.DataFrame({'x': [1, 2, 3]})
df.to_root('tmp.root')
df_ = read_root('tmp.root')
assert_frame_equal(df, df_)
os.remove('tmp.root')
def test_chunked_reading():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6]})
df.to_root('tmp.root')
count = 0
for df_ in read_root('tmp.root', chunksize=2):
assert(not df_.empty)
count += 1
assert count == 3
os.remove('tmp.root')
# Make sure that the default index counts up properly,
# even if the input is chunked
def test_chunked_reading_consistent_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6]})
df.to_root('tmp.root', store_index=False)
dfs = []
for df_ in read_root('tmp.root', chunksize=2):
dfs.append(df_)
assert(not df_.empty)
df_reconstructed = pd.concat(dfs)
|
assert_frame_equal(df, df_reconstructed)
|
pandas.util.testing.assert_frame_equal
|
#%%%%%%%%%%%%%%%%%%%%%%% Prepare for testing %%%%%%%%%%%%%%
import os
import backtest_pkg.backtest_portfolio as bt
import pandas as pd
from IPython.display import display
import importlib
os.chdir(r'M:\Share\Colleagues\Andy\Python Project\Backtest Module')
price_data = pd.read_csv('pkg_test/Adjusted_Price.csv', index_col=0, parse_dates=True)
#########################################################################
######## Portfolio construction ###############
#########################################################################
#%%%%%%%%%%%%%%%%%%%%%%% Portfolio from weight %%%%%%%%%%%%%%%
importlib.reload(bt)
# Small testing date:
small_price_data = price_data.iloc[:10, :5]
small_weight =
|
pd.DataFrame(data=[[1, 2, 3]], index=[price_data.index[0]], columns=price_data.columns[:3])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
#Basemap = object
from util import *
class trajectory_processor(pd.DataFrame):
def __init__(self, data = None, index = None, columns = None, dtype = None, copy = False, stamp=True):
super(trajectory_processor, self).__init__(data, index, columns, dtype, copy)
if "stamp" in self.columns:
self.sort("stamp", inplace=True)
elif stamp:
self['stamp'] = self.apply(lambda row: row.date + pd.Timedelta(row.time), axis=1)
self.drop(["date","time"], axis=1, inplace=True)
self.reset_index(drop=True, inplace=True)
def compute_steps(self):
""" compute time/dist/speed per sample; last row gets just 1 for all params and should be deleted later using clean_day_end """
self["time"] = [self.ix[ix+1, "stamp"] - self.ix[ix, "stamp"] for ix in range(len(self)-1)] + [pd.Timedelta("1h")]
self["dist"] = [equirectangular_approx_distance(self.ix[ix, "gps_lat"], self.ix[ix, "gps_long"], self.ix[ix+1, "gps_lat"], self.ix[ix+1, "gps_long"]) for ix in range(len(self)-1)] + [1]
self["speed"] = self.apply(lambda p: p["dist"] / (p["time"].total_seconds() / pd.Timedelta("1h").total_seconds()), axis=1)
#self.ix[len(self)-1, ["time", "dist", "speed"]] = np.NaN
return self
def compute_first_passage(self, radius, col_name=None, hard_max=3):
""" For each data point, compute the time delta until it first crosses the boundry of the *radius* circle around it """
if col_name is None:
col_name = "FPT_" + str(radius)
self[col_name] = 0
# TODO: find a better way of computing FPT! Can use geometric stuff to reduce time...
N = len(self)
for i in range(N):
j = i + 1
while j < N - 1:
d = equirectangular_approx_distance(self.ix[i ,"gps_lat"], self.ix[i, "gps_long"], self.ix[j, "gps_lat"], self.ix[j, "gps_long"])
if d >= radius:
self.ix[i, col_name] = self.ix[j, "stamp"] - self.ix[i, "stamp"]
break
j += 1
else:
# end of data
self.ix[i, col_name] = pd.Timedelta("{}h".format(hard_max))
self[col_name] = self[col_name].astype('timedelta64[s]') / 3600
self[col_name] = self[col_name].apply(lambda val: min(val, hard_max))
return self
def cluster(self, target, k=3):
data = self[target].values
data = data[np.logical_not(np.isnan(data))]
km = KMeans(n_clusters=k).fit(np.atleast_2d(data).T)
# We want the cluster index to be sorted by the values of the centroids (in order to compare runs)
km.cluster_centers_.ravel().sort()
self["cluster"] = [km.predict([val])[0] if not np.isnan(val) else val for val in self[target].values]
return self
def find_best_fpt(self, radii=None, plot=True, hard_max=3):
""" Use max variance criterion for best radius of FPT """
if radii is None:
radii = [.1, .5, 1, 2, 5, 10, 25]
# compute
for rad in radii:
print(rad)
self.compute_first_passage(rad, hard_max=hard_max)
# Need to remove the last point of each day, otherwie (since there are no recordings at night) we get that the FPT is
# the time until the next recording of the next day, and the var is inflated.
self.clean_day_end()
# diagnostics
vars = [self["FPT_" + str(rad)].std() for rad in radii]
self._fpt_diag = zip(radii, vars)
if plot:
plt.plot(radii, vars, "x-", markersize=10)
plt.xlabel("radius [Km]", fontsize=24)
plt.ylabel("std(FPT) [h]", fontsize=24)
plt.show()
return self
def diluted(self, rad=1.0):
""" Return a diluted version of this trajectory --- good for plotting """
out = []
last_lat, last_lon = 0, 0 # data is all far form this...
for i in range(len(self)):
if equirectangular_approx_distance(self.ix[i ,"gps_lat"], self.ix[i, "gps_long"], last_lat, last_lon) > rad:
last_lat, last_lon = self.ix[i ,"gps_lat"], self.ix[i, "gps_long"]
out.append((last_lon, last_lat))
return np.array(out)
def clean_day_end(self):
""" Remove the last point of each day
This is useful for cases where the dist/speed/etc. is computed based on the *next* point
which in this case doesn't exist (and is carried over to the next day).
"""
days = [s.date() for s in self["stamp"]]
ix = self["stamp"].groupby(days).apply(np.argmax).values
self.drop(ix, axis=0, inplace=True)
self.reset_index(drop=True, inplace=True)
return self
@classmethod
def stamp(Cls, file_path_in, columns, date_cols, file_path_out):
""" Convert date/time to stamp """
raw_data =
|
pd.DataFrame.from_csv(file_path_in, header=None, parse_dates=date_cols)
|
pandas.DataFrame.from_csv
|
from . import logging as logg
from .preprocessing.utils import set_initial_size
import os, re
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
from urllib.request import urlretrieve
from pathlib import Path
from scipy.sparse import issparse
from anndata import AnnData
from scanpy import read, read_loom
def load(filename, backup_url=None, header="infer", index_col="infer", **kwargs):
"""Load a csv, txt, tsv or npy file."""
numpy_ext = {"npy", "npz"}
pandas_ext = {"csv", "txt", "tsv"}
if not os.path.exists(filename) and backup_url is None:
raise FileNotFoundError(f"Did not find file {filename}.")
elif not os.path.exists(filename):
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
urlretrieve(backup_url, filename)
ext = Path(filename).suffixes[-1][1:]
if ext in numpy_ext:
return np.load(filename, **kwargs)
elif ext in pandas_ext:
df = pd.read_csv(
filename,
header=header,
index_col=None if index_col == "infer" else index_col,
**kwargs,
)
if index_col == "infer" and len(df.columns) > 1:
is_int_index = all(np.arange(0, len(df)) == df.iloc[:, 0])
is_str_index = isinstance(df.iloc[0, 0], str) and all(
[not isinstance(d, str) for d in df.iloc[0, 1:]]
)
if is_int_index or is_str_index:
df.set_index(df.columns[0], inplace=True)
return df
else:
raise ValueError(
f"'{filename}' does not end on a valid extension.\n"
f"Please, provide one of the available extensions.\n{numpy_ext | pandas_ext}\n"
)
read_csv = load
def clean_obs_names(data, base="[AGTCBDHKMNRSVWY]", ID_length=12, copy=False):
"""Clean up the obs_names.
For example an obs_name 'sample1_AGTCdate' is changed to 'AGTC' of the sample
'sample1_date'. The sample name is then saved in obs['sample_batch'].
The genetic codes are identified according to according to
https://www.neb.com/tools-and-resources/usage-guidelines/the-genetic-code.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
base: `str` (default: `[AGTCBDHKMNRSVWY]`)
Genetic code letters to be identified.
ID_length: `int` (default: 12)
Length of the Genetic Codes in the samples.
copy: `bool` (default: `False`)
Return a copy instead of writing to adata.
Returns
-------
Returns or updates `adata` with the attributes
obs_names: list
updated names of the observations
sample_batch: `.obs`
names of the identified sample batches
"""
def get_base_list(name, base):
base_list = base
while re.search(base_list + base, name) is not None:
base_list += base
if len(base_list) == 0:
raise ValueError("Encountered an invalid ID in obs_names: ", name)
return base_list
adata = data.copy() if copy else data
names = adata.obs_names
base_list = get_base_list(names[0], base)
if len(np.unique([len(name) for name in adata.obs_names])) == 1:
start, end = re.search(base_list, names[0]).span()
newIDs = [name[start:end] for name in names]
start, end = 0, len(newIDs[0])
for i in range(end - ID_length):
if np.any([ID[i] not in base for ID in newIDs]):
start += 1
if np.any([ID[::-1][i] not in base for ID in newIDs]):
end -= 1
newIDs = [ID[start:end] for ID in newIDs]
prefixes = [names[i].replace(newIDs[i], "") for i in range(len(names))]
else:
prefixes, newIDs = [], []
for name in names:
match = re.search(base_list, name)
newID = (
re.search(get_base_list(name, base), name).group()
if match is None
else match.group()
)
newIDs.append(newID)
prefixes.append(name.replace(newID, ""))
adata.obs_names = newIDs
if len(prefixes[0]) > 0 and len(np.unique(prefixes)) > 1:
adata.obs["sample_batch"] = (
pd.Categorical(prefixes)
if len(np.unique(prefixes)) < adata.n_obs
else prefixes
)
adata.obs_names_make_unique()
return adata if copy else None
def merge(adata, ldata, copy=True):
"""Merges two annotated data matrices.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix (reference data set).
ldata: :class:`~anndata.AnnData`
Annotated data matrix (to be merged into adata).
Returns
-------
Returns a :class:`~anndata.AnnData` object
"""
adata.var_names_make_unique()
ldata.var_names_make_unique()
if (
"spliced" in ldata.layers.keys()
and "initial_size_spliced" not in ldata.obs.keys()
):
set_initial_size(ldata)
elif (
"spliced" in adata.layers.keys()
and "initial_size_spliced" not in adata.obs.keys()
):
set_initial_size(adata)
common_obs = pd.unique(adata.obs_names.intersection(ldata.obs_names))
common_vars = pd.unique(adata.var_names.intersection(ldata.var_names))
if len(common_obs) == 0:
clean_obs_names(adata)
clean_obs_names(ldata)
common_obs = adata.obs_names.intersection(ldata.obs_names)
if copy:
_adata = adata[common_obs].copy()
_ldata = ldata[common_obs].copy()
else:
adata._inplace_subset_obs(common_obs)
_adata, _ldata = adata, ldata[common_obs].copy()
_adata.var_names_make_unique()
_ldata.var_names_make_unique()
same_vars = len(_adata.var_names) == len(_ldata.var_names) and np.all(
_adata.var_names == _ldata.var_names
)
join_vars = len(common_vars) > 0
if join_vars and not same_vars:
_adata._inplace_subset_var(common_vars)
_ldata._inplace_subset_var(common_vars)
for attr in _ldata.obs.keys():
if attr not in _adata.obs.keys():
_adata.obs[attr] = _ldata.obs[attr]
for attr in _ldata.obsm.keys():
if attr not in _adata.obsm.keys():
_adata.obsm[attr] = _ldata.obsm[attr]
for attr in _ldata.uns.keys():
if attr not in _adata.uns.keys():
_adata.uns[attr] = _ldata.uns[attr]
if join_vars:
for attr in _ldata.layers.keys():
if attr not in _adata.layers.keys():
_adata.layers[attr] = _ldata.layers[attr]
if _adata.shape[1] == _ldata.shape[1]:
same_vars = len(_adata.var_names) == len(_ldata.var_names) and np.all(
_adata.var_names == _ldata.var_names
)
if same_vars:
for attr in _ldata.var.keys():
if attr not in _adata.var.keys():
_adata.var[attr] = _ldata.var[attr]
for attr in _ldata.varm.keys():
if attr not in _adata.varm.keys():
_adata.varm[attr] = _ldata.varm[attr]
else:
raise ValueError("Variable names are not identical.")
return _adata if copy else None
def obs_df(adata, keys, layer=None):
lookup_keys = [k for k in keys if k in adata.var_names]
if len(lookup_keys) < len(keys):
logg.warn(
f"Keys {[k for k in keys if k not in adata.var_names]} "
f"were not found in `adata.var_names`."
)
df = pd.DataFrame(index=adata.obs_names)
for l in lookup_keys:
df[l] = adata.obs_vector(l, layer=layer)
return df
def var_df(adata, keys, layer=None):
lookup_keys = [k for k in keys if k in adata.obs_names]
if len(lookup_keys) < len(keys):
logg.warn(
f"Keys {[k for k in keys if k not in adata.obs_names]} "
f"were not found in `adata.obs_names`."
)
df = pd.DataFrame(index=adata.var_names)
for l in lookup_keys:
df[l] = adata.var_vector(l, layer=layer)
return df
def get_df(
data,
keys=None,
layer=None,
index=None,
columns=None,
sort_values=None,
dropna="all",
precision=None,
):
"""Get dataframe for a specified adata key.
Return values for specified key
(in obs, var, obsm, varm, obsp, varp, uns, or layers) as a dataframe.
Arguments
------
adata
AnnData object or a numpy array to get values from.
keys
Keys from `.var_names`, `.obs_names`, `.var`, `.obs`,
`.obsm`, `.varm`, `.obsp`, `.varp`, `.uns`, or `.layers`.
layer
Layer of `adata` to use as expression values.
index
List to set as index.
columns
List to set as columns names.
sort_values
Wether to sort values by first column (sort_values=True) or a specified column.
dropna
Drop columns/rows that contain NaNs in all ('all') or in any entry ('any').
precision
Set precision for pandas dataframe.
Returns
-------
A dataframe.
"""
if precision is not None:
pd.set_option("precision", precision)
if isinstance(data, AnnData):
keys, keys_split = (
keys.split("*") if isinstance(keys, str) and "*" in keys else (keys, None)
)
keys, key_add = (
keys.split("/") if isinstance(keys, str) and "/" in keys else (keys, None)
)
keys = [keys] if isinstance(keys, str) else keys
key = keys[0]
s_keys = ["obs", "var", "obsm", "varm", "uns", "layers"]
d_keys = [
data.obs.keys(),
data.var.keys(),
data.obsm.keys(),
data.varm.keys(),
data.uns.keys(),
data.layers.keys(),
]
if hasattr(data, "obsp") and hasattr(data, "varp"):
s_keys.extend(["obsp", "varp"])
d_keys.extend([data.obsp.keys(), data.varp.keys()])
if keys is None:
df = data.to_df()
elif key in data.var_names:
df = obs_df(data, keys, layer=layer)
elif key in data.obs_names:
df = var_df(data, keys, layer=layer)
else:
if keys_split is not None:
keys = [
k
for k in list(data.obs.keys()) + list(data.var.keys())
if key in k and keys_split in k
]
key = keys[0]
s_key = [s for (s, d_key) in zip(s_keys, d_keys) if key in d_key]
if len(s_key) == 0:
raise ValueError(f"'{key}' not found in any of {', '.join(s_keys)}.")
if len(s_key) > 1:
logg.warn(f"'{key}' found multiple times in {', '.join(s_key)}.")
s_key = s_key[-1]
df = getattr(data, s_key)[keys if len(keys) > 1 else key]
if key_add is not None:
df = df[key_add]
if index is None:
index = (
data.var_names
if s_key == "varm"
else data.obs_names
if s_key in {"obsm", "layers"}
else None
)
if index is None and s_key == "uns" and hasattr(df, "shape"):
key_cats = np.array(
[
key
for key in data.obs.keys()
if
|
is_categorical_dtype(data.obs[key])
|
pandas.api.types.is_categorical_dtype
|
#!/usr/bin/env python
####################################################################################################
# NAME
# <NAME> - contain common utility functions
#
# SYNOPSIS
# <NAME>
#
# AUTHOR
# Written by <NAME> (<EMAIL>).
#
# COPYRIGHT
# Copyright © 2013-2021 <NAME> <https://barras.io>.
# The MIT License (MIT) <https://opensource.org/licenses/MIT>.
####################################################################################################
import cProfile
import functools
import inspect
import json
import multiprocessing as mp
import numbers
import os
import pdb
import pstats
import random
import re
import string
import sys
import warnings
from calendar import monthrange
from collections import Iterable, MutableSet, OrderedDict, Sequence, Set
from datetime import *
from distutils.util import *
from enum import Enum
from io import StringIO
from pstats import SortKey
from urllib.request import urlopen
import javaproperties as prop
import numpy as np
import pandas as pd
import validators
from dateutil import parser
from dateutil.relativedelta import relativedelta
from pandas.api.types import is_numeric_dtype
####################################################################################################
# COMMON SETTINGS
####################################################################################################
warnings.simplefilter(action='ignore', category=FutureWarning)
####################################################################################################
# COMMON ENUMS
####################################################################################################
__COMMON_ENUMS____________________________________ = ''
class Enum(Enum):
def __str__(self):
return str(self.value)
##################################################
class Environment(Enum):
DEV = 'dev'
TEST = 'test'
MODEL = 'model'
PROD = 'prod'
# • COLLECTION (LIST/DICT/DATAFRAME) ###############################################################
__COLLECTION_ENUMS________________________________ = ''
class Group(Enum):
COUNT = 'count'
FIRST = 'first'
LAST = 'last'
MIN = 'min'
MAX = 'max'
MEAN = 'mean'
MEDIAN = 'median'
STD = 'std'
VAR = 'var'
SUM = 'sum'
# • CONSOLE ########################################################################################
__CONSOLE_ENUMS___________________________________ = ''
class SeverityLevel(Enum):
FAIL = 0
ERROR = 1
WARN = 2
RESULT = 3
INFO = 4
TEST = 5
DEBUG = 6
TRACE = 7
# • DATE ###########################################################################################
__DATE_ENUMS______________________________________ = ''
class Frequency(Enum):
DAYS = 'D'
WEEKS = 'W'
MONTHS = 'M'
QUARTERS = 'Q'
SEMESTERS = 'S'
YEARS = 'Y'
####################################################################################################
# COMMON CLASSES
####################################################################################################
__COMMON_CLASSES__________________________________ = ''
class OrderedSet(MutableSet, Sequence):
def __init__(self, *args):
super().__init__()
self.elements = OrderedDict.fromkeys(to_collection(*args))
##############################################
# OPERATORS
##############################################
def __getitem__(self, index):
return self.to_list()[index]
def __iter__(self):
return self.elements.__iter__()
def __len__(self):
return self.elements.__len__()
##############################################
difference = property(lambda self: self.__sub__)
difference_update = property(lambda self: self.__isub__)
intersection = property(lambda self: self.__and__)
intersection_update = property(lambda self: self.__iand__)
issubset = property(lambda self: self.__le__)
issuperset = property(lambda self: self.__ge__)
symmetric_difference = property(lambda self: self.__xor__)
symmetric_difference_update = property(lambda self: self.__ixor__)
union = property(lambda self: self.__or__)
def __contains__(self, x):
return self.elements.__contains__(x)
def __le__(self, other):
return all(e in other for e in self)
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return all(e in self for e in other)
def __gt__(self, other):
return self >= other and self != other
##############################################
def __repr__(self):
return 'OrderedSet([%s])' % (', '.join(map(repr, self.elements)))
def __str__(self):
return '{%s}' % (', '.join(map(repr, self.elements)))
##############################################
# CONVERTERS
##############################################
def to_list(self):
return to_list(self.elements)
##############################################
# PROCESSORS
##############################################
def add(self, element):
self.elements[element] = None
def discard(self, element):
self.elements.pop(element, None)
####################################################################################################
# COMMON CONSTANTS
####################################################################################################
__COMMON_CONSTANTS________________________________ = ''
CORE_COUNT = mp.cpu_count()
NA_NAME = 'NA'
# • DATE ###########################################################################################
__DATE_CONSTANTS__________________________________ = ''
# The default date format
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
# The default time format
DEFAULT_TIME_FORMAT = '%H:%M:%S'
# The default date-time format
DEFAULT_DATE_TIME_FORMAT = DEFAULT_DATE_FORMAT + ' ' + DEFAULT_TIME_FORMAT
# The default full date format
DEFAULT_FULL_DATE_FORMAT = '%B %e, %Y'
# The default month-year date format
DEFAULT_MONTH_YEAR_FORMAT = '%Y-%m'
# The default full month-year date format
DEFAULT_FULL_MONTH_YEAR_FORMAT = '%B %Y'
# The default month date format
DEFAULT_MONTH_FORMAT = '%b'
# The default full month date format
DEFAULT_FULL_MONTH_FORMAT = '%B'
#########################
# The default frequency
DEFAULT_FREQUENCY = Frequency.MONTHS
# The default group
DEFAULT_GROUP = Group.LAST
# The default period
DEFAULT_PERIOD = '1' + Frequency.YEARS.value
##################################################
# The time deltas
DAY = relativedelta(days=1)
WEEK = 7 * DAY
MONTH = relativedelta(months=1)
QUARTER = 3 * MONTH
SEMESTER = 6 * MONTH
YEAR = relativedelta(years=1)
FREQUENCY_DELTA = {
Frequency.DAYS: DAY,
Frequency.WEEKS: WEEK,
Frequency.MONTHS: MONTH,
Frequency.QUARTERS: QUARTER,
Frequency.SEMESTERS: SEMESTER,
Frequency.YEARS: YEAR
}
#########################
# The average number of days per year
DAYS_PER_YEAR = 365.25 # days
# The average number of trading days per year
TRADING_DAYS_PER_YEAR = 253 # days
# The average number of weeks per year
WEEKS_PER_YEAR = DAYS_PER_YEAR / 7 # weeks
# The number of months per year
MONTHS_PER_YEAR = 12 # months
# The number of quarters per year
QUARTERS_PER_YEAR = 4 # quarters
# The number of semesters per year
SEMESTERS_PER_YEAR = 2 # semesters
#########################
# The number of days per week
DAYS_PER_WEEK = 7 # days
# The average number of days per month
DAYS_PER_MONTH = DAYS_PER_YEAR / MONTHS_PER_YEAR # days
# The average number of days per quarter
DAYS_PER_QUARTER = DAYS_PER_YEAR / QUARTERS_PER_YEAR # days
# The average number of days per semester
DAYS_PER_SEMESTER = DAYS_PER_YEAR / SEMESTERS_PER_YEAR # days
#########################
FREQUENCY_DAY_COUNT = {
Frequency.DAYS: 1,
Frequency.WEEKS: DAYS_PER_WEEK,
Frequency.MONTHS: DAYS_PER_MONTH,
Frequency.QUARTERS: DAYS_PER_QUARTER,
Frequency.SEMESTERS: DAYS_PER_SEMESTER,
Frequency.YEARS: DAYS_PER_YEAR
}
DAY_COUNT_FREQUENCY = {FREQUENCY_DAY_COUNT[k]: k for k in FREQUENCY_DAY_COUNT}
#########################
# The weekdays
MO, TU, WE, TH, FR, SA, SU = WEEKDAYS = tuple(i for i in range(7))
# • FILE ###########################################################################################
__FILE_CONSTANTS__________________________________ = ''
# The default root
DEFAULT_ROOT = None
# The default resources directory
DEFAULT_RES_DIR = 'resources'
# • NUMBER #########################################################################################
__NUMBER_CONSTANTS________________________________ = ''
EPS = np.finfo(float).eps
INF = np.inf
NAN = np.nan
####################################################################################################
# COMMON VERIFIERS
####################################################################################################
__COMMON_VERIFIERS________________________________ = ''
def is_iterable(x):
return isinstance(x, Iterable)
def is_sequence(x):
return isinstance(x, Sequence)
def is_tuple(x):
return isinstance(x, tuple)
#########################
def is_null(x):
return x is None or is_nan(x)
def is_all_null(*args):
return all([is_null(arg) for arg in to_collection(*args)])
def is_all_not_null(*args):
return not is_any_null(*args)
def is_any_null(*args):
return any([is_null(arg) for arg in to_collection(*args)])
def is_any_not_null(*args):
return not is_all_null(*args)
#########################
def is_empty(x):
return is_null(x) or \
(is_collection(x) and len(x) == 0 or is_frame(x) and count_cols(x) == 0) or \
str(x) == ''
def is_all_empty(*args):
return all([is_empty(arg) for arg in to_collection(*args)])
def is_all_not_empty(*args):
return not is_any_empty(*args)
def is_any_empty(*args):
return any([is_empty(arg) for arg in to_collection(*args)])
def is_any_not_empty(*args):
return not is_all_empty(*args)
#########################
def is_all_value(value, *args):
return all([value == arg for arg in to_collection(*args)])
def is_all_not_value(value, *args):
return not is_any_value(value, *args)
def is_any_value(value, *args):
return any([value == arg for arg in to_collection(*args)])
def is_any_not_value(value, *args):
return not is_all_value(value, *args)
##################################################
def exists(x):
return x in globals() or x in locals() or x in dir(__builtins__)
# • ARRAY ##########################################################################################
__ARRAY_VERIFIERS_________________________________ = ''
def is_array(x):
return isinstance(x, np.ndarray)
# • COLLECTION (LIST/DICT/DATAFRAME) ###############################################################
__COLLECTION_VERIFIERS____________________________ = ''
def is_collection(x):
return is_iterable(x) and not is_string(x) and not is_tuple(x)
# • DATAFRAME ######################################################################################
__DATAFRAME_VERIFIERS_____________________________ = ''
def is_table(x):
return is_series(x) or is_frame(x)
def is_series(x):
return isinstance(x, pd.Series) or isinstance(x, pd.core.groupby.generic.SeriesGroupBy)
def is_frame(x):
return isinstance(x, pd.DataFrame) or isinstance(x, pd.core.groupby.generic.DataFrameGroupBy)
def is_group(x):
return isinstance(x, pd.core.groupby.generic.SeriesGroupBy) or \
isinstance(x, pd.core.groupby.generic.DataFrameGroupBy)
# • DATE ###########################################################################################
__DATE_VERIFIERS__________________________________ = ''
def is_date(x):
return isinstance(x, date)
def is_datetime(x):
return isinstance(x, datetime)
def is_timestamp(x):
return isinstance(x, pd.Timestamp)
def is_stamp(x):
return is_number(x)
#########################
def is_business_day(d):
if is_string(d):
d = parse_datetime(d)
return date.weekday(d) < 5
# • DICT ###########################################################################################
__DICT_VERIFIERS__________________________________ = ''
def is_dict(x):
return isinstance(x, dict)
# • FILE ###########################################################################################
__FILE_VERIFIERS__________________________________ = ''
def is_dir(path):
return os.path.isdir(path)
def is_file(path):
return os.path.isfile(path)
#########################
def is_root(path):
return os.path.dirname(path) == path
# • LIST ###########################################################################################
__LIST_VERIFIERS__________________________________ = ''
def is_list(x):
return isinstance(x, list)
# • NUMBER #########################################################################################
__NUMBER_VERIFIERS________________________________ = ''
def is_nan(x):
return x is pd.NA or x is pd.NaT or (is_number(x) and str(x) == 'nan')
#########################
def is_number(x):
return isinstance(x, numbers.Number)
def is_bool(x):
return isinstance(x, bool)
def is_int(x):
return isinstance(x, int)
def is_float(x):
return isinstance(x, float)
#########################
def equals(x, y):
return is_null(x) and is_null(y) or x == y
# • SET ############################################################################################
__SET_VERIFIERS___________________________________ = ''
def is_set(x):
return isinstance(x, Set)
def is_ordered_set(x):
return isinstance(x, OrderedSet)
# • STRING #########################################################################################
__STRING_VERIFIERS________________________________ = ''
def is_string(x):
return isinstance(x, str)
####################################################################################################
# FILE FUNCTIONS
####################################################################################################
__FILE____________________________________________ = ''
def get_path(path='.'):
return os.path.abspath(path)
#########################
def get_dir(path='.', parent=None):
path = get_path(path)
if is_null(parent):
parent = not is_dir(path)
return os.path.dirname(get_path(path) + ('/' if not parent else ''))
def get_filename(path='.'):
path = get_path(path)
return os.path.basename(path)
def get_extension(path='.'):
path = get_path(path)
return os.path.splitext(path)[1][1:]
##################################################
def find_path(filename, dir=None, subdir=None):
if is_null(dir):
dir = get_dir(get_path())
while not is_file(format_dir(dir) + format_dir(subdir) + filename) and not is_root(dir):
dir = get_dir(dir, parent=True)
elif is_file(dir):
dir = get_dir(dir)
return format_dir(dir) + format_dir(subdir) + filename
#########################
def format_dir(dir):
if is_null(dir):
return ''
if dir[-1] == '/' or dir[-1] == '\\':
dir = dir[:-1]
return dir + '/'
#########################
def read(path, encoding=None):
with open(path, mode='r', encoding=encoding) as f:
return f.read()
def read_bytes(path, encoding=None):
with open(path, mode='rb', encoding=encoding) as f:
return f.read()
def read_json(path, encoding=None):
if validators.url(path):
with urlopen(path) as f:
return json.load(f)
with open(path, encoding=encoding) as f:
return json.load(f)
def read_csv(path, encoding=None,
delimiter=',', dtype=None,
na_values=None, keep_default_na=False, na_filter=True,
parse_dates=True, date_parser=None, infer_datetime_format=True, keep_date_col=True,
verbose=False):
if is_null(na_values):
na_values = ['']
return pd.read_csv(path, encoding=encoding,
delimiter=delimiter, dtype=dtype,
na_values=na_values, keep_default_na=keep_default_na, na_filter=na_filter,
parse_dates=parse_dates, date_parser=date_parser,
infer_datetime_format=infer_datetime_format, keep_date_col=keep_date_col,
verbose=verbose)
#########################
def write(path, content, encoding=None):
with open(path, mode='w', encoding=encoding) as f:
return f.write(content)
def write_bytes(path, content, encoding=None):
with open(path, mode='wb', encoding=encoding) as f:
return f.write(content)
####################################################################################################
# COMMON PROPERTIES
####################################################################################################
__COMMON_PROPERTIES_______________________________ = ''
def load_props(filename, dir=DEFAULT_ROOT, subdir=DEFAULT_RES_DIR):
"""Returns the properties with the specified filename in the specified directory."""
with open(find_path(filename + '.properties', dir=dir, subdir=subdir), 'r') as f:
return prop.load(f)
#########################
# The properties
PROPS = load_props('common')
def get_prop(name, default=None):
"""Returns the property with the specified name."""
try:
return PROPS[name]
except Exception as ex:
return default
def get_bool_prop(name, default=None):
prop = get_prop(name, default=default)
if is_null(prop):
return prop
elif is_bool(prop):
return prop
return strtobool(str(prop))
def get_float_prop(name, default=None):
prop = get_prop(name, default=default)
if is_null(prop):
return prop
elif is_float(prop):
return prop
return float(prop)
def get_int_prop(name, default=None):
prop = get_prop(name, default=default)
if is_null(prop):
return prop
elif is_int(prop):
return prop
return int(prop)
##################################################
# The flag specifying whether to assert
ASSERT = get_bool_prop('assert', True)
# The environment
ENV = Environment(get_prop('env', 'prod'))
# • CONSOLE ########################################################################################
__CONSOLE_PROPERTIES______________________________ = ''
# The severity level
SEVERITY_LEVEL = SeverityLevel(get_int_prop('severityLevel', 4))
# The flag specifying whether to enable the verbose mode
VERBOSE = get_bool_prop('verbose', True)
# • DATE ###########################################################################################
__DATE_PROPERTIES_________________________________ = ''
# The date format
DATE_FORMAT = get_prop('dateFormat', DEFAULT_DATE_FORMAT)
# The time format
TIME_FORMAT = get_prop('timeFormat', DEFAULT_TIME_FORMAT)
# The date-time format
DATE_TIME_FORMAT = DATE_FORMAT + ' ' + TIME_FORMAT
#########################
# The frequency
FREQUENCY = Frequency(get_prop('frequency', DEFAULT_FREQUENCY.value))
# The group
GROUP = Group(get_prop('group', DEFAULT_GROUP.value))
# The period
PERIOD = get_prop('period', DEFAULT_PERIOD)
####################################################################################################
# COMMON ACCESSORS
####################################################################################################
__COMMON_ACCESSORS________________________________ = ''
def get_exec_info():
return sys.exc_info()[0]
#########################
def get_stack(level):
return inspect.stack()[level + 1]
def get_script_name(level):
return get_filename(get_stack(level + 1)[1])
def get_function_name(level):
return get_stack(level + 1)[3]
def get_line_number(level):
return get_stack(level + 1)[2]
#########################
def get_module_name(obj):
return obj.__class__.__module__
def get_class_name(obj):
return obj.__class__.__name__
def get_full_class_name(obj):
module_name = get_module_name(obj)
if is_null(module_name) or module_name == get_module_name(str):
return get_class_name(obj)
return collapse(module_name, '.', get_class_name(obj))
def get_attributes(obj):
return [a for a in vars(obj) if not a.startswith('_')]
def get_all_attributes(obj):
return [a for a in dir(obj) if not a.startswith('_')]
# • COLLECTION (LIST/DICT/DATAFRAME) ###############################################################
__COLLECTION_ACCESSORS____________________________ = ''
def get(c, index=0, axis=0):
if not is_collection(c):
return c
if is_null(axis):
return simplify(flatten(c, axis=axis)[index])
if is_table(c) or is_array(c):
if axis == 0:
return simplify(get_row(c, index))
return simplify(get_col(c, index))
elif is_dict(c):
return simplify(c[get_keys(c)[index]])
return simplify(c[index])
def get_first(c, axis=0):
return get(c, index=0, axis=axis)
def get_last(c, axis=0):
return get(c, index=-1, axis=axis)
def get_next(c):
if not is_collection(c):
return c
return next(iter(c))
#########################
def get_name(c, inclusion=None, exclusion=None):
return simplify(get_names(c, inclusion=inclusion, exclusion=exclusion))
def get_names(c, inclusion=None, exclusion=None):
"""Returns the names of the specified collection."""
if is_empty(c):
return []
if is_group(c):
c = c.obj if c.axis == 0 else c.groups
if is_table(inclusion):
inclusion = get_names(inclusion)
if is_table(exclusion):
exclusion = get_names(exclusion)
if hasattr(c, 'names'):
c = c.names
elif hasattr(c, 'name'):
c = c.name
elif not is_table(c) and not is_dict(c):
c = range(len(c))
return filter_list(c, inclusion=inclusion, exclusion=exclusion)
def get_key(c, inclusion=None, exclusion=None):
return simplify(get_keys(c, inclusion=inclusion, exclusion=exclusion))
def get_keys(c, inclusion=None, exclusion=None):
"""Returns the keys (indices/keys/names) of the specified collection that are in the specified
inclusive list and are not in the specified exclusive list."""
if is_empty(c):
return OrderedSet()
if is_group(c):
c = c.obj if c.axis == 0 else c.groups
if is_table(inclusion):
inclusion = get_keys(inclusion)
if is_table(exclusion):
exclusion = get_keys(exclusion)
if is_series(c):
c = c.index
elif not is_table(c) and not is_dict(c):
c = range(len(c))
return filter_ordered_set(c, inclusion=inclusion, exclusion=exclusion)
def get_all_common_keys(*args, inclusion=None, exclusion=None):
return reduce(lambda c1, c2: get_common_keys(c1, c2, inclusion=inclusion, exclusion=exclusion),
*args)
def get_common_keys(c1, c2, inclusion=None, exclusion=None):
"""Returns the common keys (indices/keys/names) of the specified collections that are in the
specified inclusive list and are not in the specified exclusive list."""
return get_keys(c1, inclusion=include_list(get_keys(c2), inclusion), exclusion=exclusion)
def get_index(c, inclusion=None, exclusion=None):
"""Returns the index (indices/keys/index) of the specified collection that are in the
specified inclusive list and are not in the specified exclusive list."""
if is_empty(c):
return []
if is_group(c):
c = c.groups if c.axis == 0 else c.obj
if is_table(inclusion):
inclusion = get_index(inclusion)
if is_table(exclusion):
exclusion = get_index(exclusion)
if is_table(c):
return filter_list(c.index, inclusion=inclusion, exclusion=exclusion)
return get_keys(c, inclusion=inclusion, exclusion=exclusion)
def get_all_common_index(*args, inclusion=None, exclusion=None):
return reduce(lambda c1, c2: get_common_index(c1, c2, inclusion=inclusion, exclusion=exclusion),
*args)
def get_common_index(c1, c2, inclusion=None, exclusion=None):
"""Returns the common index (indices/keys/index) of the specified collections that are in the
specified inclusive list and are not in the specified exclusive list."""
return get_index(c1, inclusion=include_list(get_keys(c2), inclusion), exclusion=exclusion)
def get_item(c, inclusion=None, exclusion=None):
return simplify(get_items(c, inclusion=inclusion, exclusion=exclusion))
def get_items(c, inclusion=None, exclusion=None):
"""Returns the items (values/entries/columns) of the specified collection whose keys
(indices/keys/names) are in the specified inclusive list and are not in the specified exclusive
list."""
if is_empty(c):
return []
if is_null(inclusion) and is_empty(exclusion):
if is_table(c) or is_dict(c):
return c.items()
keys = get_keys(c, inclusion=inclusion, exclusion=exclusion)
if is_group(c):
if c.axis == 0:
return [(k, include(v, keys)) for k, v in c]
return [(k, v) for k, v in c if k in keys]
return [(k, c[k]) for k in keys]
def get_value(c, inclusion=None, exclusion=None):
return simplify(get_values(c, inclusion=inclusion, exclusion=exclusion))
def get_values(c, inclusion=None, exclusion=None):
"""Returns the values (values/values/columns) of the specified collection whose keys
(indices/keys/names) are in the specified inclusive list and are not in the specified exclusive
list."""
if is_empty(c):
return np.array([])
elif not is_collection(c):
return to_array(c)
keys = get_keys(c, inclusion=inclusion, exclusion=exclusion)
if is_group(c):
if c.axis == 0:
return to_array([include(v, keys).values for k, v in c])
return to_array([v.values for k, v in c if k in keys])
elif is_table(c):
return include(c, keys).values
elif is_array(c):
return c[to_list(keys)]
return to_array([c[k] for k in keys])
##################################################
def set_names(c, new_names):
"""Sets the names of the specified collection."""
if is_empty(c):
return c
if is_group(c):
c = c.obj if c.axis == 0 else c.groups
if is_table(new_names):
new_names = get_names(new_names)
else:
new_names = to_list(new_names)
if is_frame(c):
c.columns = new_names
elif is_series(c):
c.name = simplify(new_names)
else:
set_keys(c, new_names)
return c
def set_keys(c, new_keys, inclusion=None, exclusion=None):
"""Sets the keys (indices/keys/names) of the specified collection that are in the specified
inclusive list and are not in the specified exclusive list."""
if is_empty(c):
return c
if is_group(c):
c = c.obj if c.axis == 0 else c.groups
if is_table(new_keys):
new_keys = get_keys(new_keys)
else:
new_keys = to_ordered_set(new_keys)
keys = get_keys(c, inclusion=inclusion, exclusion=exclusion)
if is_frame(c):
c.loc[:, keys].columns = new_keys
elif is_series(c):
set_index(c, new_keys, inclusion=inclusion, exclusion=exclusion)
elif is_dict(c):
d = c.copy()
for key, new_key in zip(keys, new_keys):
d[new_key] = c.pop(key)
update(c, d, inclusion=new_keys)
else:
l = c.copy()
for key, new_key in zip(keys, new_keys):
l[new_key] = c[key]
update(c, l, inclusion=new_keys)
return c
def set_index(c, new_index, inclusion=None, exclusion=None):
"""Sets the index (indices/keys/index) of the specified collection that are in the specified
inclusive list and are not in the specified exclusive list."""
if is_empty(c):
return c
if is_group(c):
c = c.groups if c.axis == 0 else c.obj
if is_table(new_index):
new_index_names = get_names(new_index.index)
new_index = get_index(new_index)
else:
new_index_names = get_names(new_index)
new_index = to_list(new_index)
if is_table(c):
if not is_empty(new_index) and is_tuple(new_index[0]):
c.index = pd.MultiIndex.from_tuples(new_index, names=new_index_names)
else:
index = get_index(c, inclusion=inclusion, exclusion=exclusion)
rename(c, index=dict(zip(index, new_index)))
else:
set_keys(c, new_index, inclusion=inclusion, exclusion=exclusion)
return c
def set_values(c, new_values, inclusion=None, exclusion=None):
"""Sets the values (values/values/columns) of the specified collection whose keys
(indices/keys/names) are in the specified inclusive list and are not in the specified exclusive
list."""
if is_empty(c):
return c
if is_group(c):
c = c.obj if c.axis == 0 else c.groups
if is_table(new_values):
new_values = get_values(new_values)
else:
new_values = to_list(new_values)
keys = get_keys(c, inclusion=inclusion, exclusion=exclusion)
if is_frame(c):
chained_assignment = pd.options.mode.chained_assignment
pd.options.mode.chained_assignment = None
c.loc[:, keys] = new_values
pd.options.mode.chained_assignment = chained_assignment
elif is_series(c):
chained_assignment = pd.options.mode.chained_assignment
pd.options.mode.chained_assignment = None
c.loc[keys] = new_values
pd.options.mode.chained_assignment = chained_assignment
elif is_array(c):
c[to_list(keys)] = new_values
else:
for i in range(len(keys)):
c[keys[i]] = new_values[i]
# • DATAFRAME ######################################################################################
__DATAFRAME_ACCESSORS_____________________________ = ''
def get_row(df, i=0):
"""Returns the row of the specified dataframe at the specified index."""
if is_group(df):
df = get_values(df)
elif is_table(df):
return df.iloc[i:] if i == -1 else df.iloc[i:i + 1]
return df[i]
def get_first_row(df):
"""Returns the first row of the specified dataframe."""
return get_row(df, 0)
def get_last_row(df):
"""Returns the last row of the specified dataframe."""
return get_row(df, -1)
def get_rows(df):
"""Returns the rows of the specified dataframe."""
if is_frame(df):
return [row for _, row in df.iterrows()]
elif is_series(df):
return [row for _, row in df.items()]
return [get_row(df, i) for i in range(count_rows(df))]
#########################
def get_col(df, j=0):
"""Returns the column of the specified dataframe at the specified index."""
if is_group(df):
df = get_values(df)
elif is_frame(df):
return df.iloc[:, j]
elif is_series(df):
return df.iloc[:]
return df[:, j]
def get_first_col(df):
"""Returns the first column of the specified dataframe."""
return get_col(df, 0)
def get_last_col(df):
"""Returns the last column of the specified dataframe."""
return get_col(df, -1)
def get_cols(df):
"""Returns the columns of the specified dataframe."""
if is_frame(df):
return [col for _, col in df.items()]
elif is_series(df):
return [df]
return [get_col(df, j) for j in range(count_cols(df))]
# • DATE ###########################################################################################
__DATE_ACCESSORS__________________________________ = ''
def get_date():
return date.today()
def get_date_string():
return format_date(get_date())
def get_datetime():
return datetime.now()
def get_datetime_string(fmt=DATE_TIME_FORMAT):
return format_datetime(get_datetime(), fmt=fmt)
def get_time_string():
return format_time(get_datetime())
def get_datestamp():
return to_datestamp(get_date())
def get_timestamp():
return to_timestamp(get_datetime())
def get_stamp():
return to_stamp(get_datetime())
#########################
def get_day(d=get_datetime(), week=False, year=False):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return d.weekday() if week else d.timetuple().tm_yday if year else d.day
return d
def get_days(c, week=False, year=False):
if is_table(c):
index = to_timestamp(get_index(c))
return index.weekday if week else index.dayofyear if year else index.day
elif is_dict(c):
return get_days(get_index(c), week=week, year=year)
return collection_to_type([get_day(d, week=week, year=year) for d in c], c)
def get_week(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return d.isocalendar()[1]
return d
def get_weeks(c):
if is_table(c):
return to_timestamp(get_index(c)).week
elif is_dict(c):
return get_weeks(get_index(c))
return collection_to_type([get_week(d) for d in c], c)
def get_year_week(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
iso_cal = d.isocalendar()
return iso_cal[0], iso_cal[1]
return d
def get_year_weeks(c):
if is_table(c):
return pd.MultiIndex.from_tuples(get_year_weeks(get_index(c)), names=['year', 'week'])
elif is_dict(c):
return get_year_weeks(get_index(c))
return collection_to_type([get_year_week(d) for d in c], c)
def get_month(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return d.month
return d
def get_months(c):
if is_table(c):
return to_timestamp(get_index(c)).month
elif is_dict(c):
return get_months(get_index(c))
return collection_to_type([get_month(d) for d in c], c)
def get_quarter(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return ceil(d.month / 3)
return d
def get_quarters(c):
if is_table(c):
return to_timestamp(get_index(c)).quarter
elif is_dict(c):
return get_quarters(get_index(c))
return collection_to_type([get_quarter(d) for d in c], c)
def get_semester(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return ceil(d.month / 6)
return d
def get_semesters(c):
if is_table(c):
return ceil(get_months(c) / 6)
elif is_dict(c):
return get_semesters(get_index(c))
return collection_to_type([get_semester(d) for d in c], c)
def get_year(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
if is_date(d):
return d.year
return d
def get_years(c):
if is_table(c):
return to_timestamp(get_index(c)).year
elif is_dict(c):
return get_years(get_index(c))
return collection_to_type([get_year(d) for d in c], c)
#########################
def get_business_day(d=get_datetime(), prev=True):
if is_string(d):
d = parse_datetime(d)
if not is_business_day(d):
return get_prev_business_day(d) if prev else get_next_business_day(d)
return d
def get_prev_business_day(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
day = date.weekday(d)
if day is MO: # Monday
return d - 3 * DAY
elif day is SU: # Sunday
return d - 2 * DAY
return d - DAY
def get_next_business_day(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
day = date.weekday(d)
if day is FR: # Friday
return d + 3 * DAY
elif day is SA: # Saturday
return d + 2 * DAY
return d + DAY
#########################
def get_month_range(d=get_datetime()):
if is_string(d):
d = parse_datetime(d)
return monthrange(d.year, d.month)
def get_month_weekday(year, month):
return monthrange(year, month)[0]
def get_month_days(year, month):
return monthrange(year, month)[1]
#########################
def get_month_start(d=get_datetime()):
if is_collection(d):
return apply(get_month_start, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(day=1))
def get_month_end(d=get_datetime()):
if is_collection(d):
return apply(get_month_end, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(day=get_month_days(d.year, d.month)))
def get_prev_month_start(d=get_datetime()):
if is_collection(d):
return apply(get_prev_month_start, d)
elif is_string(d):
d = parse_datetime(d)
if d.month == 1:
year = d.year - 1
month = 12
else:
year = d.year
month = d.month - 1
return reset_time(d.replace(year=year, month=month, day=1))
def get_prev_month_end(d=get_datetime()):
if is_collection(d):
return apply(get_prev_month_end, d)
elif is_string(d):
d = parse_datetime(d)
if d.month == 1:
year = d.year - 1
month = 12
else:
year = d.year
month = d.month - 1
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
def get_next_month_start(d=get_datetime()):
if is_collection(d):
return apply(get_next_month_start, d)
elif is_string(d):
d = parse_datetime(d)
if d.month == 12:
year = d.year + 1
month = 1
else:
year = d.year
month = d.month + 1
return reset_time(d.replace(year=year, month=month, day=1))
def get_next_month_end(d=get_datetime()):
if is_collection(d):
return apply(get_next_month_end, d)
elif is_string(d):
d = parse_datetime(d)
if d.month == 12:
year = d.year + 1
month = 1
else:
year = d.year
month = d.month + 1
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
#########################
def get_quarter_start(d=get_datetime()):
if is_collection(d):
return apply(get_quarter_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
month = 1
elif 4 <= d.month <= 6:
month = 4
elif 7 <= d.month <= 9:
month = 7
else:
month = 10
return reset_time(d.replace(month=month, day=1))
def get_quarter_end(d=get_datetime()):
if is_collection(d):
return apply(get_quarter_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
month = 3
elif 4 <= d.month <= 6:
month = 6
elif 7 <= d.month <= 9:
month = 9
else:
month = 12
return reset_time(d.replace(month=month, day=get_month_days(d.year, month)))
def get_prev_quarter_start(d=get_datetime()):
if is_collection(d):
return apply(get_prev_quarter_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
year = d.year - 1
month = 10
elif 4 <= d.month <= 6:
year = d.year
month = 1
elif 7 <= d.month <= 9:
year = d.year
month = 4
else:
year = d.year
month = 7
return reset_time(d.replace(year=year, month=month, day=1))
def get_prev_quarter_end(d=get_datetime()):
if is_collection(d):
return apply(get_prev_quarter_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
year = d.year - 1
month = 10
elif 4 <= d.month <= 6:
year = d.year
month = 1
elif 7 <= d.month <= 9:
year = d.year
month = 4
else:
year = d.year
month = 7
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
def get_next_quarter_start(d=get_datetime()):
if is_collection(d):
return apply(get_next_quarter_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
year = d.year
month = 4
elif 4 <= d.month <= 6:
year = d.year
month = 7
elif 7 <= d.month <= 9:
year = d.year
month = 10
else:
year = d.year + 1
month = 1
return reset_time(d.replace(year=year, month=month, day=1))
def get_next_quarter_end(d=get_datetime()):
if is_collection(d):
return apply(get_next_quarter_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 3:
year = d.year
month = 4
elif 4 <= d.month <= 6:
year = d.year
month = 7
elif 7 <= d.month <= 9:
year = d.year
month = 10
else:
year = d.year + 1
month = 1
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
#########################
def get_semester_start(d=get_datetime()):
if is_collection(d):
return apply(get_semester_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
month = 1
else:
month = 7
return reset_time(d.replace(month=month, day=1))
def get_semester_end(d=get_datetime()):
if is_collection(d):
return apply(get_semester_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
month = 6
else:
month = 12
return reset_time(d.replace(month=month, day=get_month_days(d.year, month)))
def get_prev_semester_start(d=get_datetime()):
if is_collection(d):
return apply(get_prev_semester_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
year = d.year - 1
month = 7
else:
year = d.year
month = 1
return reset_time(d.replace(year=year, month=month, day=1))
def get_prev_semester_end(d=get_datetime()):
if is_collection(d):
return apply(get_prev_semester_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
year = d.year - 1
month = 12
else:
year = d.year
month = 6
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
def get_next_semester_start(d=get_datetime()):
if is_collection(d):
return apply(get_next_semester_start, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
year = d.year
month = 7
else:
year = d.year + 1
month = 1
return reset_time(d.replace(year=year, month=month, day=1))
def get_next_semester_end(d=get_datetime()):
if is_collection(d):
return apply(get_next_semester_end, d)
elif is_string(d):
d = parse_datetime(d)
if 1 <= d.month <= 6:
year = d.year
month = 12
else:
year = d.year + 1
month = 6
return reset_time(d.replace(year=year, month=month, day=get_month_days(year, month)))
#########################
def get_year_start(d=get_datetime()):
if is_collection(d):
return apply(get_year_start, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(month=1, day=1))
def get_year_end(d=get_datetime()):
if is_collection(d):
return apply(get_year_end, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(month=12, day=31))
def get_prev_year_start(d=get_datetime()):
if is_collection(d):
return apply(get_prev_year_start, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(year=d.year - 1, month=1, day=1))
def get_prev_year_end(d=get_datetime()):
if is_collection(d):
return apply(get_prev_year_end, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(year=d.year - 1, month=12, day=31))
def get_next_year_start(d=get_datetime()):
if is_collection(d):
return apply(get_next_year_start, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(year=d.year + 1, month=1, day=1))
def get_next_year_end(d=get_datetime()):
if is_collection(d):
return apply(get_next_year_end, d)
elif is_string(d):
d = parse_datetime(d)
return reset_time(d.replace(year=d.year + 1, month=12, day=31))
#########################
def get_start_period(y, s=None, q=None, m=None, w=None, d=None):
if is_all_not_null(y, m, d):
return create_datetime(y, m, d)
elif is_all_not_null(y, w):
return datetime.fromisocalendar(y, w, 1)
elif is_all_not_null(y, m):
return create_datetime(y, m, 1)
elif is_all_not_null(y, q):
return create_datetime(y, 1 + 3 * (q - 1), 1)
elif is_all_not_null(y, s):
return create_datetime(y, 1 + 6 * (s - 1), 1)
return create_datetime(y, 1, 1)
def get_end_period(y, s=None, q=None, m=None, w=None, d=None):
if is_all_not_null(y, m, d):
return create_datetime(y, m, d)
elif is_all_not_null(y, w):
return datetime.fromisocalendar(y, w, 7)
elif is_all_not_null(y, m):
return create_datetime(y, m, monthrange(y, m)[1])
elif is_all_not_null(y, q):
return create_datetime(y, 3 + 3 * (q - 1), monthrange(y, 3 + 3 * (q - 1))[1])
elif is_all_not_null(y, s):
return create_datetime(y, 6 + 6 * (s - 1), monthrange(y, 6 + 6 * (s - 1))[1])
return create_datetime(y, 12, 31)
#########################
def get_start_date(d=get_datetime(), freq=FREQUENCY):
if freq is Frequency.WEEKS:
y, w = get_year_week(d)
return get_start_period(y=y, w=w)
elif freq is Frequency.MONTHS:
return get_start_period(y=get_year(d), m=get_month(d))
elif freq is Frequency.QUARTERS:
return get_start_period(y=get_year(d), q=get_quarter(d))
elif freq is Frequency.SEMESTERS:
return get_start_period(y=get_year(d), s=get_semester(d))
elif freq is Frequency.YEARS:
return get_start_period(y=get_year(d))
return to_datetime(d)
def get_end_date(d=get_datetime(), freq=FREQUENCY):
if freq is Frequency.WEEKS:
y, w = get_year_week(d)
return get_end_period(y=y, w=w)
elif freq is Frequency.MONTHS:
return get_end_period(y=get_year(d), m=get_month(d))
elif freq is Frequency.QUARTERS:
return get_end_period(y=get_year(d), q=get_quarter(d))
elif freq is Frequency.SEMESTERS:
return get_end_period(y=get_year(d), s=get_semester(d))
elif freq is Frequency.YEARS:
return get_end_period(y=get_year(d))
return to_datetime(d)
def get_start_datetime(d=get_datetime(), freq=FREQUENCY):
return to_datetime(get_start_date(d, freq=freq))
def get_end_datetime(d=get_datetime(), freq=FREQUENCY):
return to_datetime(get_end_date(d, freq=freq))
def get_start_timestamp(d=get_datetime(), freq=Frequency.DAYS):
return to_timestamp(get_start_date(d, freq=freq))
def get_end_timestamp(d=get_datetime(), freq=Frequency.DAYS):
return to_timestamp(get_end_date(d, freq=freq))
#########################
def get_period_index(period=PERIOD):
period_length = to_period_length(period)
period_freq = to_period_freq(period)
if period_freq is Frequency.DAYS:
return period_length
elif period_freq is Frequency.WEEKS:
return period_length * DAYS_PER_WEEK
elif period_freq is Frequency.MONTHS:
return period_length * DAYS_PER_MONTH
elif period_freq is Frequency.QUARTERS:
return period_length * DAYS_PER_QUARTER
elif period_freq is Frequency.SEMESTERS:
return period_length * DAYS_PER_SEMESTER
elif period_freq is Frequency.YEARS:
return period_length * DAYS_PER_YEAR
def get_period_length(d=get_datetime(), period=PERIOD, freq=FREQUENCY):
return diff_date(subtract_period(d, period), d, freq=freq)
def get_period_days(d=get_datetime(), period=PERIOD):
if is_null(d):
period_length = to_period_length(period)
period_freq = to_period_freq(period)
return period_length * FREQUENCY_DAY_COUNT[period_freq]
return diff_days(subtract_period(d, period), d)
def get_period_weeks(d=get_datetime(), period=PERIOD):
if is_null(d):
return get_period_days(d, period=period) / DAYS_PER_WEEK
return diff_weeks(subtract_period(d, period), d)
def get_period_months(d=get_datetime(), period=PERIOD):
if is_null(d):
return get_period_days(d, period=period) / DAYS_PER_MONTH
return diff_months(subtract_period(d, period), d)
def get_period_quarters(d=get_datetime(), period=PERIOD):
if is_null(d):
return get_period_days(d, period=period) / DAYS_PER_QUARTER
return diff_quarters(subtract_period(d, period), d)
def get_period_semesters(d=get_datetime(), period=PERIOD):
if is_null(d):
return get_period_days(d, period=period) / DAYS_PER_SEMESTER
return diff_semesters(subtract_period(d, period), d)
def get_period_years(d=get_datetime(), period=PERIOD):
if is_null(d):
return get_period_days(d, period=period) / DAYS_PER_YEAR
return diff_years(subtract_period(d, period), d)
####################################################################################################
# COMMON CONVERTERS
####################################################################################################
__COMMON_CONVERTERS_______________________________ = ''
# • ARRAY ##########################################################################################
__ARRAY_CONVERTERS________________________________ = ''
def to_array(*args):
if len(args) == 1:
arg = args[0]
if is_array(arg):
return arg
elif is_collection(arg):
return np.array(arg)
return np.array(to_list(*args))
def unarray(a):
if is_array(a):
if len(a) == 1:
return a[0]
return tuple(a)
return a
# • COLLECTION (LIST/DICT/DATAFRAME) ###############################################################
__COLLECTION_CONVERTERS___________________________ = ''
def to_collection(*args):
if len(args) == 1:
arg = args[0]
if is_collection(arg):
return arg
return [arg]
return args
def uncollect(c):
if is_collection(c):
if len(c) == 1:
return c[0]
return tuple(c)
return c
#########################
def collection_to_type(c, x):
if is_frame(x):
return to_frame(c, names=get_names(x), index=get_index(x))
elif is_series(x):
return to_series(c, name=get_names(x), index=get_index(x))
elif is_dict(x):
return dict(zip(get_keys(x), c))
elif is_ordered_set(x):
return to_ordered_set(c)
elif is_set(x):
return to_set(c)
elif is_array(x):
return to_array(c)
return c
def collection_to_common_type(c, x, inclusion=None, exclusion=None):
c = include(c, get_common_keys(c, x, inclusion=inclusion, exclusion=exclusion))
if is_frame(x):
return to_frame(c)
elif is_series(x):
return to_series(c)
elif is_dict(x):
return to_dict(c)
elif is_ordered_set(x):
return to_ordered_set(c)
elif is_set(x):
return to_set(c)
elif is_array(x):
return to_array(c)
return c
# • DATAFRAME ######################################################################################
__DATAFRAME_CONVERTERS____________________________ = ''
def to_series(data, name=None, index=None, type=None):
"""Converts the specified collection to a series."""
if is_empty(data):
data = []
type = object
elif is_group(data):
data = data.obj
if is_frame(data):
if count_cols(data) > 1:
return get_cols(data)
series = get_col(data) if not is_empty(data) else pd.Series(data=data, dtype=type)
elif is_series(data):
series = data
else:
series = pd.Series(data=data, dtype=type)
if not is_null(name):
set_names(series, name)
if not is_null(index):
set_index(series, index)
return series
def to_time_series(data, name=None, index=None, type=float):
"""Converts the specified collection to a time series."""
if not is_null(index):
index = to_timestamp(index)
return to_series(data, name=name, index=index, type=type)
def to_frame(data, names=None, index=None, type=None):
"""Converts the specified collection to a dataframe."""
if is_empty(data):
data = []
type = object
elif is_group(data):
data = data.obj
if is_frame(data):
frame = data
elif is_series(data):
frame = data.to_frame()
elif is_dict(data):
frame = pd.DataFrame.from_dict(data, orient='index', dtype=type)
else:
frame = pd.DataFrame(data=data, dtype=type)
if not is_null(names):
set_names(frame, names)
if not is_null(index):
set_index(frame, index)
return frame
# • DATE ###########################################################################################
__DATE_CONVERTERS_________________________________ = ''
def to_date(x, fmt=DATE_FORMAT):
if is_null(x):
return None
elif is_collection(x):
return apply(to_date, x, fmt=fmt)
elif is_stamp(x):
x = parse_stamp(x)
return create_date(x.year, x.month, x.day)
elif is_timestamp(x):
x = x.to_pydatetime()
return create_date(x.year, x.month, x.day)
elif is_datetime(x):
return create_date(x.year, x.month, x.day)
elif is_date(x):
return x
return datetime.strptime(x, fmt)
def to_datetime(x, fmt=DATE_TIME_FORMAT):
if is_null(x):
return None
elif is_collection(x):
return apply(to_datetime, x, fmt=fmt)
elif is_stamp(x):
return parse_stamp(x)
elif is_timestamp(x):
return x.to_pydatetime()
elif is_datetime(x):
return x
elif is_date(x):
return create_datetime(x.year, x.month, x.day)
return datetime.strptime(x, fmt)
def to_time(x, fmt=TIME_FORMAT):
if is_null(x):
return None
return to_datetime(x, fmt=fmt)
def to_datestamp(d):
if is_null(d):
return None
elif is_stamp(d):
d = parse_stamp(d)
return pd.to_datetime(d).floor('D')
def to_timestamp(d):
if is_null(d):
return None
elif is_stamp(d):
d = parse_stamp(d)
return pd.to_datetime(d)
def to_stamp(x):
if is_null(x):
return None
elif is_collection(x):
return apply(to_stamp, x)
elif is_stamp(x):
return x
return to_datetime(x).timestamp()
#########################
def timestamp_to_type(t, x):
"""Converts the specified timestamp to the type of the specified variable."""
if is_collection(t):
return apply(timestamp_to_type, t, x)
elif is_stamp(x):
return to_stamp(t)
elif is_timestamp(x):
return t
elif is_datetime(x):
return to_datetime(t)
elif is_date(x):
return to_date(t)
return t
#########################
def to_period(length, freq=FREQUENCY):
return str(length) + freq.value
def to_period_length(period):
return int(period[0:-1])
def to_period_freq(period):
return Frequency(period[-1].upper())
# • DICT ###########################################################################################
__DICT_CONVERTERS_________________________________ = ''
def to_dict(c):
"""Converts the specified collection to a dictionary."""
if is_empty(c):
return {}
elif is_table(c):
return c.to_dict()
elif is_dict(c):
return c
return {k: v for k, v in enumerate(c)}
# • LIST ###########################################################################################
__LIST_CONVERTERS_________________________________ = ''
def to_list(*args):
if len(args) == 1:
arg = args[0]
if is_list(arg):
return arg
elif is_collection(arg):
return list(arg)
return [arg]
return list(args)
def unlist(l):
if is_list(l):
if len(l) == 1:
return l[0]
return tuple(l)
return l
# • NUMBER #########################################################################################
__NUMBER_CONVERTERS_______________________________ = ''
def to_bool(x):
if is_null(x):
return NAN
elif is_collection(x):
return apply(to_bool, x)
return strtobool(str(x))
def to_int(x):
if is_null(x):
return NAN
elif is_collection(x):
return apply(to_int, x)
return int(x)
def to_float(x):
if is_null(x):
return NAN
elif is_collection(x):
return apply(to_float, x)
return float(x)
# • SET ############################################################################################
__SET_CONVERTERS__________________________________ = ''
def to_set(*args):
if len(args) == 1:
arg = args[0]
if is_set(arg):
return arg
elif is_collection(arg):
return set(arg)
return {arg}
return set(args)
def unset(s):
if is_set(s):
if len(s) == 1:
return get_next(s)
return tuple(s)
return s
##################################################
def to_ordered_set(*args):
if len(args) == 1:
arg = args[0]
if is_ordered_set(arg):
return arg
return OrderedSet(*args)
# • STRING #########################################################################################
__STRING_CONVERTERS_______________________________ = ''
def to_string(x, delimiter=','):
if is_null(x):
return None
elif is_collection(x):
return collapse(x, delimiter=delimiter)
return str(x)
####################################################################################################
# COMMON GENERATORS
####################################################################################################
__COMMON_GENERATORS_______________________________ = ''
# • DATE ###########################################################################################
__DATE_GENERATORS_________________________________ = ''
def create_date(y, m, d):
return date(int(y), int(m), int(d))
def create_datetime(y, m, d):
return datetime(int(y), int(m), int(d))
def create_timestamp(y, m, d):
return pd.Timestamp(int(y), int(m), int(d))
def create_stamp(y, m, d):
return to_stamp(create_datetime(y, m, d))
#########################
def create_date_range(date_from, date_to, periods=None, freq=FREQUENCY, group=GROUP):
if not is_null(periods):
return to_date(pd.date_range(date_from, date_to, periods=periods))
if freq is Frequency.SEMESTERS:
months = [1, 7] if group is Group.FIRST else [6, 12]
return filter_with(create_date_sequence(date_from, date_to, freq=Frequency.QUARTERS,
group=group),
f=lambda d: get_month(d) in months)
f = freq.value
if group is Group.FIRST:
if freq is Frequency.DAYS:
pass
elif freq is Frequency.WEEKS:
f += '-MON'
else:
f += 'S'
return
|
pd.date_range(date_from, date_to, freq=f)
|
pandas.date_range
|
import socket
import logging
from os import mkdir, path
from sys import exc_info, getsizeof
from traceback import extract_tb
import json
import pandas as pd
from datetime import datetime
UDP_SERVER_PORT = 4040
UDP_SERVER_IP = "0.0.0.0"
LOG_FOLDERNAME = 'log'
LOG_FILENAME = 'log.log'
LOG_FILEMODE = 'a'
LOG_FORMAT = '%(asctime)-15s %(levelname)-8s - %(message)s'
LOG_DATEFMT = '%Y-%m-%d %H:%M:%S'
LOG_LEVEL = logging.DEBUG
# folder to store log file
if not path.exists(LOG_FOLDERNAME):
mkdir(LOG_FOLDERNAME)
# check exists dataframe otherwise create it.
if not path.isfile('dataframe.csv'):
df =
|
pd.DataFrame(columns=['datetime', 'plant', 'temperature', 'air-humidity', 'soil-humidity'])
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.