prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import itertools
import os
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import gridspec
import warnings
import itertools
import re
from matplotlib import pyplot as plt
from natsort import natsorted
from scipy import optimize as optimization
from sklearn.metrics import roc_auc_score
from sklearn.metrics._ranking import _binary_clf_curve
from sklearn.exceptions import UndefinedMetricWarning
from sklearn.utils import resample
def fourPL(x, A, B, C, D):
"""4 parameter logistic function"""
return ((A-D)/(1.0+((x/C)**(B))) + D)
def fit2df(df, model, serum_group='serum ID'):
"""fit model to x, y data in dataframe.
Return a dataframe with fit x, y for plotting
"""
sera = df[serum_group].unique()
antigens = df['antigen'].unique()
secondaries = df['secondary ID'].unique()
plate_ids = df['plate ID'].unique()
prnt = df['PRNT'].unique()
keys = itertools.product(sera, antigens, secondaries, plate_ids, prnt)
df_fit = pd.DataFrame(columns=df.columns)
for serum, antigen, secondary, plate_id, prnt in keys:
print('Fitting {}, {}...'.format(serum, antigen))
sec_dilu_df = df[(df[serum_group] == serum) &
(df['antigen'] == antigen) &
(df['secondary ID'] == secondary) &
(df['plate ID'] == plate_id) &
(df['PRNT'] == prnt)]
sec_dilutions = sec_dilu_df['secondary dilution'].unique()
for sec_dilution in sec_dilutions:
sub_df = sec_dilu_df[(sec_dilu_df['secondary dilution'] == sec_dilution)].reset_index(drop=True)
df_fit_temp = pd.DataFrame()
guess = [0, 1, 5e-4, 1]
xdata = sub_df['serum dilution'].to_numpy()
ydata = sub_df['OD'].to_numpy()
ydata = ydata[xdata > 0]
xdata = xdata[xdata > 0] # concentration has to be positive
params, params_covariance = optimization.curve_fit(model, xdata, ydata, guess, bounds=(0, np.inf), maxfev=1e5)
x_input = np.logspace(np.log10(np.min(xdata)), np.log10(np.max(xdata)), 50)
#y_fit = fivePL(x_input,*params)
y_fit = fourPL(x_input, *params)
df_fit_temp['serum dilution'] = x_input
df_fit_temp['OD'] = y_fit
df_fit_temp['b'] = params[1]
df_fit_temp['c'] = params[2]
df_fit_temp['d'] = params[3]
sub_df_expand = pd.concat(
[sub_df.loc[[0], [serum_group,
'antigen',
'serum type',
'serum cat',
'secondary ID',
'secondary dilution',
'pipeline',
'PRNT',
'plate ID']]] * len(df_fit_temp.index), axis=0).reset_index(drop=True)
df_fit_temp = pd.concat([df_fit_temp, sub_df_expand], axis=1)
df_fit = df_fit.append(df_fit_temp)
print('4PL fitting finished')
return df_fit
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute the area under the ROC curve
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] <NAME>. An introduction to ROC analysis[J]. Pattern Recognition
Letters, 2006, 27(8):861-874.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([0. , 0. , 0.5, 0.5, 1. ])
>>> tpr
array([0. , 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] * 1.01, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def roc_ci(df, ci):
"""Helper function to compute mean and confidence intervals
from the bootstrapped distribution"""
tpr_mean = df['tpr'].mean()
cis = sns.utils.ci(df['tpr'], ci).tolist()
return pd.Series([tpr_mean] + cis, ['True positive rate', 'ci_low', 'ci_high'])
def roc_from_df(df, ci=None):
"""
Helper function to compute ROC curves using pandas.groupby(). Confidence intervals
are computed using bootstrapping with stratified resampling
:param dataframe df: dataframe containing serum OD info
:param int or None ci: Confidence interval of the ROC curves in the unit of percent
(95 would be 95%). If None, confidence intervals are not computed.
:return dataframe rate_df: dataframe contains ROC curves for each condition
"""
aucs = []
n_btstp = 1000
s = {}
fprs = []
tprs = []
thrs = []
y_test = df['serum type'] == 'positive'
y_prob = df['OD']
s['False positive rate'], s['True positive rate'], s['threshold'] = \
roc_curve(y_test, y_prob, pos_label=1, drop_intermediate=False)
try:
s['AUC'] = [roc_auc_score(y_test, y_prob)] * len(s['False positive rate'])
except ValueError as err:
print('antigen {} only has {} serum type. {}'.
format(df['antigen'].unique()[0], y_test.unique()[0], err))
s['AUC'] = [np.nan] * len(s['False positive rate'])
if ci is None:
return pd.Series(s)
else:
for i in range(n_btstp):
df_rsmpl = resample(df, n_samples=len(df), stratify=df['serum type'])
y_test = df_rsmpl['serum type'] == 'positive'
y_prob = df_rsmpl['OD']
fpr_tmp, tpr_tmp, thr_tmp = \
roc_curve(y_test, y_prob, pos_label=1, drop_intermediate=False)
fprs += fpr_tmp.tolist()
tprs += tpr_tmp.tolist()
thrs += thr_tmp.tolist()
try:
aucs.append(roc_auc_score(y_test, y_prob))
except ValueError as err:
aucs.append(np.nan)
rate_df = | pd.DataFrame({'False positive rate': fprs, 'tpr': tprs}) | pandas.DataFrame |
from . import common
import pandas as pd
import matplotlib.pyplot as plt
from skbio.stats.ordination import OrdinationResults
from qiime2 import Artifact
def beta_3d_plot(
artifact, metadata=None, hue=None, azim=-60, elev=30, s=80, ax=None,
figsize=None, hue_order=None
):
"""
Create a 3D scatter plot from PCoA results.
+---------------------+---------------------------------------------------+
| q2-diversity plugin | Example |
+=====================+===================================================+
| QIIME 2 CLI | qiime diversity pcoa [OPTIONS] |
+---------------------+---------------------------------------------------+
| QIIME 2 API | from qiime2.plugins.diversity.methods import pcoa |
+---------------------+---------------------------------------------------+
Parameters
----------
artifact : str or qiime2.Artifact
Artifact file or object from the q2-diversity plugin with the
semantic type ``PCoAResults`` or
``PCoAResults % Properties('biplot')``.
metadata : str or qiime2.Metadata, optional
Metadata file or object.
hue : str, optional
Grouping variable that will produce points with different colors.
azim : int, default: -60
Azimuthal viewing angle.
elev : int, default: 30
Elevation viewing angle.
s : float, default: 80.0
Marker size.
ax : matplotlib.axes.Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
hue_order : list, optional
Specify the order of categorical levels of the 'hue' semantic.
Returns
-------
matplotlib.axes.Axes
Axes object with the plot drawn onto it.
See Also
--------
dokdo.api.ordinate
dokdo.api.beta_2d_plot
dokdo.api.beta_scree_plot
dokdo.api.beta_parallel_plot
dokdo.api.addbiplot
Examples
--------
Below is a simple example:
.. code:: python3
import dokdo
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
qza_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/unweighted_unifrac_pcoa_results.qza'
metadata_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/sample-metadata.tsv'
dokdo.beta_3d_plot(qza_file,
metadata_file,
'body-site',
figsize=(8, 8))
plt.tight_layout()
.. image:: images/beta_3d_plot-1.png
We can control the camera angle with ``elev`` and ``azim``:
.. code:: python3
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(1, 2, 1, projection='3d')
ax2 = fig.add_subplot(1, 2, 2, projection='3d')
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax1,
hue='body-site',
elev=15)
dokdo.beta_3d_plot(qza_file,
metadata_file,
ax=ax2,
hue='body-site',
azim=70)
plt.tight_layout()
.. image:: images/beta_3d_plot-2.png
"""
if isinstance(artifact, str):
_pcoa_results = Artifact.load(artifact)
else:
_pcoa_results = artifact
ordination_results = _pcoa_results.view(OrdinationResults)
df = ordination_results.samples.iloc[:, :3]
df.columns = ['A1', 'A2', 'A3']
props = ordination_results.proportion_explained
if metadata is None:
df = df
else:
mf = common.get_mf(metadata)
df = | pd.concat([df, mf], axis=1, join='inner') | pandas.concat |
# Download Census population data by tract
## Upload population data and census boundary files to S3
import numpy as np
import pandas as pd
import geopandas as gpd
import intake
import boto3
import census
from us import states
# Set env
# Can't figure out how to read the API key from env
c = census.Census('2dacc2d1fe8ae85c99e2f934a70576d6f731bb0f', year = 2017)
catalog = intake.open_catalog('./catalogs/*.yml')
s3 = boto3.client('s3')
#----------------------------------------------------------------#
# Download 2017 ACS 5-year population data
#----------------------------------------------------------------#
# 2018 is not available for ACS 5-year
# ACS 1-year doesn't have tract-level data
raw = c.acs5.state_county_tract('B01003_001E', states.CA.fips, '037', census.ALL)
df = pd.DataFrame(raw)
# Subset for LA County
df['GEOID'] = df.state + df.county + df.tract
df = df[['GEOID', 'B01003_001E']]
df.rename(columns = {'B01003_001E': 'pop'}, inplace = True)
df = df.sort_values('GEOID', ascending = True)
df.to_parquet('s3://public-health-dashboard/data/raw/pop_by_tract2017.parquet')
"""
# The syntax from censusdata is more similar to the R packages
# But, cleaning GEOID from censusdata package is difficult
pop = pd.DataFrame()
for y in range(2017, 2018):
data = censusdata.download('acs5', y,
censusdata.censusgeo([('state', '06'), ('county', '037'), ('tract', '*')]),
['B01003_001E'])
data['year'] = y
pop = pop.append(data)
"""
#----------------------------------------------------------------#
# Import census tracts and clip to City of LA
#----------------------------------------------------------------#
tract = gpd.read_file('s3://public-health-dashboard/gis/raw/tl_2019_06_tract/').to_crs({'init':'epsg:2229'})
city_boundary = catalog.city_boundary.read().to_crs({'init':'epsg:2229'})
# Number of square feet in one square mile
sqft_to_sqmi = 2.788e+7
# Subset to LA County
tract = tract[tract.COUNTYFP == '037']
# Clip tracts to City of LA. Keep if centroid falls within boundary.
centroids = tract.centroid
centroids = | pd.DataFrame(centroids) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import time, os
import matplotlib.pylab as plt
import matplotlib
LINEWIDTH=0.5
c1 = plt.rcParams['axes.color_cycle'][0]
c2 = plt.rcParams['axes.color_cycle'][1]
matplotlib.rcParams.update({
'font.family' :'Myriad Pro',
'font.size' :7,
'axes.labelsize' :'large',
'axes.titlesize' :'large',
'xtick.labelsize':'medium',
'ytick.labelsize':'medium',
'legend.fontsize':'medium',
'axes.linewidth' :LINEWIDTH,
'axes.grid' :False,
'xtick.major.size':2,
'xtick.minor.size':0,
'ytick.major.size':0,
'ytick.minor.size':0,
'legend.numpoints':1,
'legend.frameon':False,
'patch.facecolor':.4*np.ones(3),
'patch.edgecolor':'None',
})
import analysis_util
metadata_names = {'origin1':'Region', 'origin2':'Country', 'genre1':'Genre Class', 'genre2': 'Genre', 'era1': 'Era', 'chordRoot':'Chord Root'}
def plot_metadata_valence(metadata_df, lyric_info_df, plot_col, connectline=False, rotate=False):
#most_popular = global_popular_vals[plot_col] #
most_popular = analysis_util.get_most_popular(metadata_df, plot_col)
overall_happiness, oh_SE = analysis_util.get_sentiment_values(c_lyrics_all, plot_col)
print ('Valence values (sorted)')
print (overall_happiness.sort_values())
print ()
print ()
if plot_col not in ['year','decade', 'era1']:
pdf = overall_happiness.sort_values()
else:
pdf = overall_happiness # .iloc[::-1]
analysis_util.bar_plot(pdf, 1.96*oh_SE, False, connectline, rotate=rotate)
#plt.grid(False)
xlabel, ylabel = 'Valence', metadata_names[plot_col]
if not rotate:
plt.xlabel(xlabel)
plt.ylabel('')
plt.title('')
else:
plt.xlabel(ylabel)
plt.ylabel(xlabel)
plt.title('')
def titlecase(s):
return s[0].upper() + s[1:]
def do_wordshift(catdf, allwords_df, numtop=10, grpby='word'):
mean_happiness = allwords_df.happiness.mean()
avgprevalence = allwords_df.word.value_counts() / len(allwords_df)
word_happiness = pd.DataFrame.from_records(list(analysis_util.happiness_dict.items()), columns=['word','happiness']).set_index('word').happiness
catprevalence = catdf.word.value_counts() / len(catdf)
cat_mean_happiness = catdf.happiness.mean()
#catprevalence - avgprevalence
wordshift = (100.0/np.abs(cat_mean_happiness - mean_happiness)) * (word_happiness - mean_happiness) * (catprevalence - avgprevalence)
wordshift_df = pd.DataFrame({'ws':wordshift, 'wsabs':np.abs(wordshift)})
#print (' sum:', wordshift_df.ws.sum())
#print ('cat meanhappiness vs meanhappiness: %0.4f vs %.4f' % ( cat_mean_happiness , mean_happiness ))
topvals = wordshift_df.sort_values(by='wsabs', ascending=False).iloc[numtop:0:-1]
c1 = 'orange'
c2 = 'blue'
lbls = {}
colors = {}
for cword in topvals.index.values:
# up_or_down = '\\uparrow' if catprevalence.loc[cword]>avgprevalence.loc[cword] else '\\downarrow'
# plus_or_minus = '+' if word_happiness.loc[cword]>mean_happiness else '-'
# spacer = '\\!\\!\\!\\!\\!'
# if topvals.loc[cword].ws > 0:
# lbltext = cword + '$' + up_or_down + spacer + plus_or_minus + '$'
# else:
# lbltext = '$' + up_or_down + spacer + plus_or_minus + '$' + cword
up_or_down = '$\\!\\!\\%s\\!\\!$' % ('uparrow' if catprevalence.loc[cword]>avgprevalence.loc[cword] else 'downarrow')
plus_or_minus = '+' if word_happiness.loc[cword]>mean_happiness else '-'
if topvals.loc[cword].ws > 0:
lbltext = cword + up_or_down + plus_or_minus
else:
lbltext = up_or_down + plus_or_minus + ' ' + cword
lbls[cword] = lbltext
colors[cword] = c1 if word_happiness.loc[cword]>mean_happiness else c2
topvals['lbl'] = pd.Series(lbls)
topvals['cols'] = | pd.Series(colors) | pandas.Series |
# coding: utf-8
# # CareerCon 2019 - Help Navigate Robots
# ## Robots are smart… by design !!
#
# 
#
# ---
#
# Robots are smart… by design. To fully understand and properly navigate a task, however, they need input about their environment.
#
# In this competition, you’ll help robots recognize the floor surface they’re standing on using data collected from Inertial Measurement Units (IMU sensors).
#
# We’ve collected IMU sensor data while driving a small mobile robot over different floor surfaces on the university premises. The task is to predict which one of the nine floor types (carpet, tiles, concrete) the robot is on using sensor data such as acceleration and velocity. Succeed and you'll help improve the navigation of robots without assistance across many different surfaces, so they won’t fall down on the job.
#
# ### Its a golden chance to help humanity, by helping Robots !
#
# <br>
# <img src="https://media2.giphy.com/media/EizPK3InQbrNK/giphy.gif" border="1" width="400" height="300">
# <br>
# # DATA
# **X_[train/test].csv** - the input data, covering 10 sensor channels and 128 measurements per time series plus three ID columns:
#
# - ```row_id```: The ID for this row.
#
# - ```series_id: ID``` number for the measurement series. Foreign key to y_train/sample_submission.
#
# - ```measurement_number```: Measurement number within the series.
#
# The orientation channels encode the current angles how the robot is oriented as a quaternion (see Wikipedia). Angular velocity describes the angle and speed of motion, and linear acceleration components describe how the speed is changing at different times. The 10 sensor channels are:
#
# ```
# orientation_X
#
# orientation_Y
#
# orientation_Z
#
# orientation_W
#
# angular_velocity_X
#
# angular_velocity_Y
#
# angular_velocity_Z
#
# linear_acceleration_X
#
# linear_acceleration_Y
#
# linear_acceleration_Z
# ```
#
# **y_train.csv** - the surfaces for training set.
#
# - ```series_id```: ID number for the measurement series.
#
# - ```group_id```: ID number for all of the measurements taken in a recording session. Provided for the training set only, to enable more cross validation strategies.
#
# - ```surface```: the target for this competition.
#
# **sample_submission.csv** - a sample submission file in the correct format.
# ### Load packages
# In[1]:
import numpy as np
import pandas as pd
import os
from time import time
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from matplotlib import rcParams
get_ipython().run_line_magic('matplotlib', 'inline')
le = preprocessing.LabelEncoder()
from numba import jit
import itertools
from seaborn import countplot,lineplot, barplot
from numba import jit
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn import preprocessing
from scipy.stats import randint as sp_randint
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
import matplotlib.style as style
style.use('ggplot')
import warnings
warnings.filterwarnings('ignore')
import gc
gc.enable()
get_ipython().system('ls ../input/')
get_ipython().system('ls ../input/robots-best-submission')
print ("Ready !")
# ### Load data
# In[2]:
data = pd.read_csv('../input/career-con-2019/X_train.csv')
tr = pd.read_csv('../input/career-con-2019/X_train.csv')
sub = pd.read_csv('../input/career-con-2019/sample_submission.csv')
test = pd.read_csv('../input/career-con-2019/X_test.csv')
target = pd.read_csv('../input/career-con-2019/y_train.csv')
print ("Data is ready !!")
# # Data exploration
# In[3]:
data.head()
# In[4]:
test.head()
# In[5]:
target.head()
# In[6]:
len(data.measurement_number.value_counts())
# Each series has 128 measurements.
#
# **1 serie = 128 measurements**.
#
# For example, serie with series_id=0 has a surface = *fin_concrete* and 128 measurements.
# ### describe (basic stats)
# In[7]:
data.describe()
# In[8]:
test.describe()
# In[9]:
target.describe()
# ### There is missing data in test and train data
# In[10]:
totalt = data.isnull().sum().sort_values(ascending=False)
percent = (data.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Training")
missing_data.tail()
# In[11]:
totalt = test.isnull().sum().sort_values(ascending=False)
percent = (test.isnull().sum()/data.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([totalt, percent], axis=1, keys=['Total', 'Percent'])
print ("Missing Data at Test")
missing_data.tail()
# In[12]:
print ("Test has ", (test.shape[0]-data.shape[0])/128, "series more than Train (later I will prove it) = 768 registers")
dif = test.shape[0]-data.shape[0]
print ("Let's check this extra 6 series")
test.tail(768).describe()
# If we look at the features: orientation, angular velocity and linear acceleration, we can see big differences between **max** and **min** from entire test vs 6 extra test's series (see **linear_acceleration_Z**).
#
# Obviously we are comparing 3810 series vs 6 series so this is not a big deal.
# ### goup_id will be important !!
# In[13]:
target.groupby('group_id').surface.nunique().max()
# In[14]:
target['group_id'].nunique()
# **73 groups**
# **Each group_id is a unique recording session and has only one surface type **
# In[15]:
sns.set(style='darkgrid')
sns.countplot(y = 'surface',
data = target,
order = target['surface'].value_counts().index)
plt.show()
# ### Target feature - surface and group_id distribution
# Let's show now the distribution of target feature - surface and group_id.
# by @gpreda.
# In[16]:
fig, ax = plt.subplots(1,1,figsize=(26,8))
tmp = pd.DataFrame(target.groupby(['group_id', 'surface'])['series_id'].count().reset_index())
m = tmp.pivot(index='surface', columns='group_id', values='series_id')
s = sns.heatmap(m, linewidths=.1, linecolor='black', annot=True, cmap="YlGnBu")
s.set_title('Number of surface category per group_id', size=16)
plt.show()
# We need to classify on which surface our robot is standing.
#
# Multi-class Multi-output
#
# 9 classes (suface)
# In[17]:
plt.figure(figsize=(23,5))
sns.set(style="darkgrid")
countplot(x="group_id", data=target, order = target['group_id'].value_counts().index)
plt.show()
# **So, we have 3810 train series, and 3816 test series.
# Let's engineer some features!**
#
# ## Example: Series 1
#
# Let's have a look at the values of features in a single time-series, for example series 1 ```series_id=0```
#
# Click to see all measurements of the **first series**
# In[18]:
serie1 = tr.head(128)
serie1.head()
# In[19]:
serie1.describe()
# In[20]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(serie1.columns[3:]):
plt.subplot(3, 4, i + 1)
plt.plot(serie1[col])
plt.title(col)
# In this example, we can see a quite interesting performance:
# 1. Orientation X increases
# 2. Orientation Y decreases
# 3. We don't see any kind of pattern except for linear_acceleration_Y
#
# And we know that in this series, the robot moved throuh "fine_concrete".
# In[21]:
target.head(1)
# In[22]:
del serie1
gc.collect()
# ## Visualizing Series
#
# Before, I showed you as an example the series 1.
#
# **This code allows you to visualize any series.**
#
# From: *Code Snippet For Visualizing Series Id by @shaz13*
# In[23]:
series_dict = {}
for series in (data['series_id'].unique()):
series_dict[series] = data[data['series_id'] == series]
# In[24]:
def plotSeries(series_id):
style.use('ggplot')
plt.figure(figsize=(28, 16))
print(target[target['series_id'] == series_id]['surface'].values[0].title())
for i, col in enumerate(series_dict[series_id].columns[3:]):
if col.startswith("o"):
color = 'red'
elif col.startswith("a"):
color = 'green'
else:
color = 'blue'
if i >= 7:
i+=1
plt.subplot(3, 4, i + 1)
plt.plot(series_dict[series_id][col], color=color, linewidth=3)
plt.title(col)
# **Now, Let's see code for series 15 ( is an example, try what you want)**
# In[25]:
id_series = 15
plotSeries(id_series)
# In[26]:
del series_dict
gc.collect()
# <br>
# ### Correlations (Part I)
# In[27]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(tr.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# **Correlations test (click "code")**
# In[28]:
f,ax = plt.subplots(figsize=(8, 8))
sns.heatmap(test.iloc[:,3:].corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
# Well, this is immportant, there is a **strong correlation** between:
# - angular_velocity_Z and angular_velocity_Y
# - orientation_X and orientation_Y
# - orientation_Y and orientation_Z
#
# Moreover, test has different correlations than training, for example:
#
# - angular_velocity_Z and orientation_X: -0.1(training) and 0.1(test). Anyway, is too small in both cases, it should not be a problem.
# ## Fourier Analysis
#
# My hope was, that different surface types yield (visible) differences in the frequency spectrum of the sensor measurements.
#
# Machine learning techniques might learn frequency filters on their own, but why don't give the machine a little head start? So I computed the the cyclic FFT for the angular velocity and linear acceleration sensors and plotted mean and standard deviation of the absolute values of the frequency components per training surface category (leaving out the frequency 0 (i.e. constants like sensor bias, earth gravity, ...).
#
# The sensors show some different frequency characterists (see plots below), but unfortunately the surface categories have all similar (to the human eye) shapes, varying mostly in total power, and the standard deviations are high (compared to differences in the means). So there are no nice strong characteristic peaks for surface types. But that does not mean, that there is nothing detectable by more sophisticated statistical methods.
#
# This article http://www.kaggle.com/christoffer/establishing-sampling-frequency makes a convincing case, that the sampling frequency is around 400Hz, so according to that you would see the frequency range to 3-200 Hz in the diagrams (and aliased higher frequencies).
#
# by [@trohwer64](https://www.kaggle.com/trohwer64)
# In[29]:
get_ipython().system('ls ../input')
# In[30]:
train_x = pd.read_csv('../input/career-con-2019/X_train.csv')
train_y = pd.read_csv('../input/career-con-2019/y_train.csv')
# In[31]:
import math
def prepare_data(t):
def f(d):
d=d.sort_values(by=['measurement_number'])
return pd.DataFrame({
'lx':[ d['linear_acceleration_X'].values ],
'ly':[ d['linear_acceleration_Y'].values ],
'lz':[ d['linear_acceleration_Z'].values ],
'ax':[ d['angular_velocity_X'].values ],
'ay':[ d['angular_velocity_Y'].values ],
'az':[ d['angular_velocity_Z'].values ],
})
t= t.groupby('series_id').apply(f)
def mfft(x):
return [ x/math.sqrt(128.0) for x in np.absolute(np.fft.fft(x)) ][1:65]
t['lx_f']=[ mfft(x) for x in t['lx'].values ]
t['ly_f']=[ mfft(x) for x in t['ly'].values ]
t['lz_f']=[ mfft(x) for x in t['lz'].values ]
t['ax_f']=[ mfft(x) for x in t['ax'].values ]
t['ay_f']=[ mfft(x) for x in t['ay'].values ]
t['az_f']=[ mfft(x) for x in t['az'].values ]
return t
# In[32]:
t=prepare_data(train_x)
t=pd.merge(t,train_y[['series_id','surface','group_id']],on='series_id')
t=t.rename(columns={"surface": "y"})
# In[33]:
def aggf(d, feature):
va= np.array(d[feature].tolist())
mean= sum(va)/va.shape[0]
var= sum([ (va[i,:]-mean)**2 for i in range(va.shape[0]) ])/va.shape[0]
dev= [ math.sqrt(x) for x in var ]
return pd.DataFrame({
'mean': [ mean ],
'dev' : [ dev ],
})
display={
'hard_tiles_large_space':'r-.',
'concrete':'g-.',
'tiled':'b-.',
'fine_concrete':'r-',
'wood':'g-',
'carpet':'b-',
'soft_pvc':'y-',
'hard_tiles':'r--',
'soft_tiles':'g--',
}
# In[34]:
import matplotlib.pyplot as plt
plt.figure(figsize=(14, 8*7))
#plt.margins(x=0.0, y=0.0)
#plt.tight_layout()
# plt.figure()
features=['lx_f','ly_f','lz_f','ax_f','ay_f','az_f']
count=0
for feature in features:
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
count+=1
plt.subplot(len(features)+1,1,count)
for i,(k,v) in enumerate(display.items()):
plt.plot(b, stat.at[k,'mean'], v, label=k)
# plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
leg = plt.legend(loc='best', ncol=3, mode="expand", shadow=True, fancybox=True)
plt.title("sensor: " + feature)
plt.xlabel("frequency component")
plt.ylabel("amplitude")
count+=1
plt.subplot(len(features)+1,1,count)
k='concrete'
v=display[k]
feature='lz_f'
stat= t.groupby('y').apply(aggf,feature)
stat.index= stat.index.droplevel(-1)
b=[*range(len(stat.at['carpet','mean']))]
plt.errorbar(b, stat.at[k,'mean'], yerr=stat.at[k,'dev'], fmt=v)
plt.title("sample for error bars (lz_f, surface concrete)")
plt.xlabel("frequency component")
plt.ylabel("amplitude")
plt.show()
# In[35]:
del train_x, train_y
gc.collect()
# ## Is it an Humanoid Robot instead of a car?
#
# 
#
# **Acceleration**
# - X (mean at 0)
# - Y axis is centered at a value wich shows us the movement (straight ).
# - Z axis is centered at 10 (+- 9.8) wich is the gravity !! , you can see how the robot bounds.
#
# Angular velocity (X,Y,Z) has mean (0,0,0) so there is no lineal movement on those axis (measured with an encoder or potentiometer)
#
# **Fourier**
#
# We can see: with a frequency 3 Hz we can see an acceleration, I think that acceleration represents one step.
# Maybe ee can suppose that every step is caused by many different movements, that's why there are different accelerations at different frequencies.
#
# Angular velocity represents spins.
# Every time the engine/servo spins, the robot does an step - relation between acc y vel.
# ---
#
# # Feature Engineering
# In[36]:
def plot_feature_distribution(df1, df2, label1, label2, features,a=2,b=5):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(17,9))
for feature in features:
i += 1
plt.subplot(a,b,i)
sns.kdeplot(df1[feature], bw=0.5,label=label1)
sns.kdeplot(df2[feature], bw=0.5,label=label2)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[37]:
features = data.columns.values[3:]
plot_feature_distribution(data, test, 'train', 'test', features)
# Godd news, our basic features have the **same distribution (Normal) on test and training**. There are some differences between *orientation_X* , *orientation_Y* and *linear_acceleration_Y*.
#
# I willl try **StandardScaler** to fix this, and remember: orientation , angular velocity and linear acceleration are measured with different units, scaling might be a good choice.
# In[38]:
def plot_feature_class_distribution(classes,tt, features,a=5,b=2):
i = 0
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(a,b,figsize=(16,24))
for feature in features:
i += 1
plt.subplot(a,b,i)
for clas in classes:
ttc = tt[tt['surface']==clas]
sns.kdeplot(ttc[feature], bw=0.5,label=clas)
plt.xlabel(feature, fontsize=9)
locs, labels = plt.xticks()
plt.tick_params(axis='x', which='major', labelsize=8)
plt.tick_params(axis='y', which='major', labelsize=8)
plt.show();
# In[39]:
classes = (target['surface'].value_counts()).index
aux = data.merge(target, on='series_id', how='inner')
plot_feature_class_distribution(classes, aux, features)
# **Normal distribution**
#
# There are obviously differences between *surfaces* and that's good, we will focus on that in order to classify them better.
#
# Knowing this differences and that variables follow a normal distribution (in most of the cases) we need to add new features like: ```mean, std, median, range ...``` (for each variable).
#
# However, I will try to fix *orientation_X* and *orientation_Y* as I explained before, scaling and normalizing data.
#
# ---
#
# ### Now with a new scale (more more precision)
# In[40]:
plt.figure(figsize=(26, 16))
for i,col in enumerate(aux.columns[3:13]):
ax = plt.subplot(3,4,i+1)
ax = plt.title(col)
for surface in classes:
surface_feature = aux[aux['surface'] == surface]
sns.kdeplot(surface_feature[col], label = surface)
# ### Histogram for main features
# In[41]:
plt.figure(figsize=(26, 16))
for i, col in enumerate(data.columns[3:]):
ax = plt.subplot(3, 4, i + 1)
sns.distplot(data[col], bins=100, label='train')
sns.distplot(test[col], bins=100, label='test')
ax.legend()
# ## Step 0 : quaternions
# Orientation - quaternion coordinates
# You could notice that there are 4 coordinates: X, Y, Z, W.
#
# Usually we have X, Y, Z - Euler Angles. But Euler Angles are limited by a phenomenon called "gimbal lock," which prevents them from measuring orientation when the pitch angle approaches +/- 90 degrees. Quaternions provide an alternative measurement technique that does not suffer from gimbal lock. Quaternions are less intuitive than Euler Angles and the math can be a little more complicated.
#
# Here are some articles about it:
#
# http://www.chrobotics.com/library/understanding-quaternions
#
# http://www.tobynorris.com/work/prog/csharp/quatview/help/orientations_and_quaternions.htm
#
# Basically 3D coordinates are converted to 4D vectors.
# In[42]:
# https://stackoverflow.com/questions/53033620/how-to-convert-euler-angles-to-quaternions-and-get-the-same-euler-angles-back-fr?rq=1
def quaternion_to_euler(x, y, z, w):
import math
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
X = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
Z = math.atan2(t3, t4)
return X, Y, Z
# In[43]:
def fe_step0 (actual):
# https://www.mathworks.com/help/aeroblks/quaternionnorm.html
# https://www.mathworks.com/help/aeroblks/quaternionmodulus.html
# https://www.mathworks.com/help/aeroblks/quaternionnormalize.html
# Spoiler: you don't need this ;)
actual['norm_quat'] = (actual['orientation_X']**2 + actual['orientation_Y']**2 + actual['orientation_Z']**2 + actual['orientation_W']**2)
actual['mod_quat'] = (actual['norm_quat'])**0.5
actual['norm_X'] = actual['orientation_X'] / actual['mod_quat']
actual['norm_Y'] = actual['orientation_Y'] / actual['mod_quat']
actual['norm_Z'] = actual['orientation_Z'] / actual['mod_quat']
actual['norm_W'] = actual['orientation_W'] / actual['mod_quat']
return actual
#
# > *Are there any reasons to not automatically normalize a quaternion? And if there are, what quaternion operations do result in non-normalized quaternions?*
#
# Any operation that produces a quaternion will need to be normalized because floating-point precession errors will cause it to not be unit length.
# I would advise against standard routines performing normalization automatically for performance reasons.
# Any competent programmer should be aware of the precision issues and be able to normalize the quantities when necessary - and it is not always necessary to have a unit length quaternion.
# The same is true for vector operations.
#
# source: https://stackoverflow.com/questions/11667783/quaternion-and-normalization
# In[44]:
data = fe_step0(data)
test = fe_step0(test)
print(data.shape)
data.head()
# In[45]:
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(18, 5))
ax1.set_title('quaternion X')
sns.kdeplot(data['norm_X'], ax=ax1, label="train")
sns.kdeplot(test['norm_X'], ax=ax1, label="test")
ax2.set_title('quaternion Y')
sns.kdeplot(data['norm_Y'], ax=ax2, label="train")
sns.kdeplot(test['norm_Y'], ax=ax2, label="test")
ax3.set_title('quaternion Z')
sns.kdeplot(data['norm_Z'], ax=ax3, label="train")
sns.kdeplot(test['norm_Z'], ax=ax3, label="test")
ax4.set_title('quaternion W')
sns.kdeplot(data['norm_W'], ax=ax4, label="train")
sns.kdeplot(test['norm_W'], ax=ax4, label="test")
plt.show()
# ## Step 1: (x, y, z, w) -> (x,y,z) quaternions to euler angles
# In[46]:
def fe_step1 (actual):
"""Quaternions to Euler Angles"""
x, y, z, w = actual['norm_X'].tolist(), actual['norm_Y'].tolist(), actual['norm_Z'].tolist(), actual['norm_W'].tolist()
nx, ny, nz = [], [], []
for i in range(len(x)):
xx, yy, zz = quaternion_to_euler(x[i], y[i], z[i], w[i])
nx.append(xx)
ny.append(yy)
nz.append(zz)
actual['euler_x'] = nx
actual['euler_y'] = ny
actual['euler_z'] = nz
return actual
# In[47]:
data = fe_step1(data)
test = fe_step1(test)
print (data.shape)
data.head()
# 
# In[48]:
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(15, 5))
ax1.set_title('Roll')
sns.kdeplot(data['euler_x'], ax=ax1, label="train")
sns.kdeplot(test['euler_x'], ax=ax1, label="test")
ax2.set_title('Pitch')
sns.kdeplot(data['euler_y'], ax=ax2, label="train")
sns.kdeplot(test['euler_y'], ax=ax2, label="test")
ax3.set_title('Yaw')
sns.kdeplot(data['euler_z'], ax=ax3, label="train")
sns.kdeplot(test['euler_z'], ax=ax3, label="test")
plt.show()
# **Euler angles** are really important, and we have a problem with Z.
#
# ### Why Orientation_Z (euler angle Z) is so important?
#
# We have a robot moving around, imagine a robot moving straight through different surfaces (each with different features), for example concrete and hard tile floor. Our robot can can **bounce** or **balance** itself a little bit on if the surface is not flat and smooth, that's why we need to work with quaternions and take care of orientation_Z.
#
# 
# In[49]:
data.head()
# ## Step 2: + Basic features
# In[50]:
def feat_eng(data):
df = pd.DataFrame()
data['totl_anglr_vel'] = (data['angular_velocity_X']**2 + data['angular_velocity_Y']**2 + data['angular_velocity_Z']**2)** 0.5
data['totl_linr_acc'] = (data['linear_acceleration_X']**2 + data['linear_acceleration_Y']**2 + data['linear_acceleration_Z']**2)**0.5
data['totl_xyz'] = (data['orientation_X']**2 + data['orientation_Y']**2 + data['orientation_Z']**2)**0.5
data['acc_vs_vel'] = data['totl_linr_acc'] / data['totl_anglr_vel']
def mean_change_of_abs_change(x):
return np.mean(np.diff(np.abs(np.diff(x))))
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
df[col + '_mean'] = data.groupby(['series_id'])[col].mean()
df[col + '_median'] = data.groupby(['series_id'])[col].median()
df[col + '_max'] = data.groupby(['series_id'])[col].max()
df[col + '_min'] = data.groupby(['series_id'])[col].min()
df[col + '_std'] = data.groupby(['series_id'])[col].std()
df[col + '_range'] = df[col + '_max'] - df[col + '_min']
df[col + '_maxtoMin'] = df[col + '_max'] / df[col + '_min']
df[col + '_mean_abs_chg'] = data.groupby(['series_id'])[col].apply(lambda x: np.mean(np.abs(np.diff(x))))
df[col + '_mean_change_of_abs_change'] = data.groupby('series_id')[col].apply(mean_change_of_abs_change)
df[col + '_abs_max'] = data.groupby(['series_id'])[col].apply(lambda x: np.max(np.abs(x)))
df[col + '_abs_min'] = data.groupby(['series_id'])[col].apply(lambda x: np.min(np.abs(x)))
df[col + '_abs_avg'] = (df[col + '_abs_min'] + df[col + '_abs_max'])/2
return df
# In[51]:
get_ipython().run_cell_magic('time', '', 'data = feat_eng(data)\ntest = feat_eng(test)\nprint ("New features: ",data.shape)')
# In[52]:
data.head()
# ## New advanced features
# **Useful functions**
# In[53]:
from scipy.stats import kurtosis
from scipy.stats import skew
def _kurtosis(x):
return kurtosis(x)
def CPT5(x):
den = len(x)*np.exp(np.std(x))
return sum(np.exp(x))/den
def skewness(x):
return skew(x)
def SSC(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
xn_i1 = x[0:len(x)-2] # xn-1
ans = np.heaviside((xn-xn_i1)*(xn-xn_i2),0)
return sum(ans[1:])
def wave_length(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(abs(xn_i2-xn))
def norm_entropy(x):
tresh = 3
return sum(np.power(abs(x),tresh))
def SRAV(x):
SRA = sum(np.sqrt(abs(x)))
return np.power(SRA/len(x),2)
def mean_abs(x):
return sum(abs(x))/len(x)
def zero_crossing(x):
x = np.array(x)
x = np.append(x[-1], x)
x = np.append(x,x[1])
xn = x[1:len(x)-1]
xn_i2 = x[2:len(x)] # xn+1
return sum(np.heaviside(-xn*xn_i2,0))
# This advanced features based on robust statistics.
# In[54]:
def fe_advanced_stats(data):
df = pd.DataFrame()
for col in data.columns:
if col in ['row_id','series_id','measurement_number']:
continue
if 'orientation' in col:
continue
print ("FE on column ", col, "...")
df[col + '_skew'] = data.groupby(['series_id'])[col].skew()
df[col + '_mad'] = data.groupby(['series_id'])[col].mad()
df[col + '_q25'] = data.groupby(['series_id'])[col].quantile(0.25)
df[col + '_q75'] = data.groupby(['series_id'])[col].quantile(0.75)
df[col + '_q95'] = data.groupby(['series_id'])[col].quantile(0.95)
df[col + '_iqr'] = df[col + '_q75'] - df[col + '_q25']
df[col + '_CPT5'] = data.groupby(['series_id'])[col].apply(CPT5)
df[col + '_SSC'] = data.groupby(['series_id'])[col].apply(SSC)
df[col + '_skewness'] = data.groupby(['series_id'])[col].apply(skewness)
df[col + '_wave_lenght'] = data.groupby(['series_id'])[col].apply(wave_length)
df[col + '_norm_entropy'] = data.groupby(['series_id'])[col].apply(norm_entropy)
df[col + '_SRAV'] = data.groupby(['series_id'])[col].apply(SRAV)
df[col + '_kurtosis'] = data.groupby(['series_id'])[col].apply(_kurtosis)
df[col + '_zero_crossing'] = data.groupby(['series_id'])[col].apply(zero_crossing)
return df
# - Frequency of the max value
# - Frequency of the min value
# - Count Positive values
# - Count Negative values
# - Count zeros
# In[55]:
basic_fe = ['linear_acceleration_X','linear_acceleration_Y','linear_acceleration_Z',
'angular_velocity_X','angular_velocity_Y','angular_velocity_Z']
# In[56]:
def fe_plus (data):
aux = pd.DataFrame()
for serie in data.index:
#if serie%500 == 0: print ("> Serie = ",serie)
aux = X_train[X_train['series_id']==serie]
for col in basic_fe:
data.loc[serie,col + '_unq'] = aux[col].round(3).nunique()
data.loc[serie,col + 'ratio_unq'] = aux[col].round(3).nunique()/18
try:
data.loc[serie,col + '_freq'] = aux[col].value_counts().idxmax()
except:
data.loc[serie,col + '_freq'] = 0
data.loc[serie,col + '_max_freq'] = aux[aux[col] == aux[col].max()].shape[0]
data.loc[serie,col + '_min_freq'] = aux[aux[col] == aux[col].min()].shape[0]
data.loc[serie,col + '_pos_freq'] = aux[aux[col] >= 0].shape[0]
data.loc[serie,col + '_neg_freq'] = aux[aux[col] < 0].shape[0]
data.loc[serie,col + '_nzeros'] = (aux[col]==0).sum(axis=0)
# ### Important !
# As you can see in this kernel https://www.kaggle.com/anjum48/leakage-within-the-train-dataset
#
# As discussed in the discussion forums (https://www.kaggle.com/c/career-con-2019/discussion/87239#latest-508136) it looks as if each series is part of longer aquisition periods that have been cut up into chunks with 128 samples.
#
# This means that each series is not truely independent and there is leakage between them via the orientation data. Therefore if you have any features that use orientation, you will get a very high CV score due to this leakage in the train set.
#
# [This kernel](https://www.kaggle.com/anjum48/leakage-within-the-train-dataset) will show you how it is possible to get a CV score of 0.992 using only the **orientation data**.
#
# ---
#
# **So I recommend not to use orientation information**
# ## Correlations (Part II)
# In[57]:
#https://stackoverflow.com/questions/17778394/list-highest-correlation-pairs-from-a-large-correlation-matrix-in-pandas
corr_matrix = data.corr().abs()
raw_corr = data.corr()
sol = (corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
.stack()
.sort_values(ascending=False))
top_corr = pd.DataFrame(sol).reset_index()
top_corr.columns = ["var1", "var2", "abs corr"]
# with .abs() we lost the sign, and it's very important.
for x in range(len(top_corr)):
var1 = top_corr.iloc[x]["var1"]
var2 = top_corr.iloc[x]["var2"]
corr = raw_corr[var1][var2]
top_corr.at[x, "raw corr"] = corr
# In[58]:
top_corr.head(15)
# ### Filling missing NAs and infinite data ∞ by zeroes 0
# In[59]:
data.fillna(0,inplace=True)
test.fillna(0,inplace=True)
data.replace(-np.inf,0,inplace=True)
data.replace(np.inf,0,inplace=True)
test.replace(-np.inf,0,inplace=True)
test.replace(np.inf,0,inplace=True)
# ## Label encoding
# In[60]:
target.head()
# In[61]:
target['surface'] = le.fit_transform(target['surface'])
# In[62]:
target['surface'].value_counts()
# In[63]:
target.head()
# # Run Model
# **use random_state at Random Forest**
#
# if you don't use random_state you will get a different solution everytime, sometimes you will be lucky, but other times you will lose your time comparing.
# **Validation Strategy: Stratified KFold**
# In[64]:
folds = StratifiedKFold(n_splits=10, shuffle=True, random_state=59)
# In[65]:
predicted = np.zeros((test.shape[0],9))
measured= np.zeros((data.shape[0]))
score = 0
# In[66]:
for times, (trn_idx, val_idx) in enumerate(folds.split(data.values,target['surface'].values)):
model = RandomForestClassifier(n_estimators=500, n_jobs = -1)
#model = RandomForestClassifier(n_estimators=500, max_depth=10, min_samples_split=5, n_jobs=-1)
model.fit(data.iloc[trn_idx],target['surface'][trn_idx])
measured[val_idx] = model.predict(data.iloc[val_idx])
predicted += model.predict_proba(test)/folds.n_splits
score += model.score(data.iloc[val_idx],target['surface'][val_idx])
print("Fold: {} score: {}".format(times,model.score(data.iloc[val_idx],target['surface'][val_idx])))
importances = model.feature_importances_
indices = np.argsort(importances)
features = data.columns
if model.score(data.iloc[val_idx],target['surface'][val_idx]) > 0.92000:
hm = 30
plt.figure(figsize=(7, 10))
plt.title('Feature Importances')
plt.barh(range(len(indices[:hm])), importances[indices][:hm], color='b', align='center')
plt.yticks(range(len(indices[:hm])), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
gc.collect()
# In[67]:
print('Avg Accuracy RF', score / folds.n_splits)
# In[68]:
confusion_matrix(measured,target['surface'])
# ### Confusion Matrix Plot
# In[69]:
# https://www.kaggle.com/artgor/where-do-the-robots-drive
def plot_confusion_matrix(truth, pred, classes, normalize=False, title=''):
cm = confusion_matrix(truth, pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(10, 10))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix', size=15)
plt.colorbar(fraction=0.046, pad=0.04)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(False)
plt.tight_layout()
# In[70]:
plot_confusion_matrix(target['surface'], measured, le.classes_)
# ### Submission (Part I)
# In[71]:
sub['surface'] = le.inverse_transform(predicted.argmax(axis=1))
sub.to_csv('submission.csv', index=False)
sub.head()
# ### Best Submission
# In[72]:
best_sub = pd.read_csv('../input/robots-best-submission/final_submission.csv')
best_sub.to_csv('best_submission.csv', index=False)
best_sub.head(10)
# ## References
#
# [1] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b
#
# [2] https://www.kaggle.com/artgor/where-do-the-robots-drive
#
# [3] https://www.kaggle.com/gpreda/robots-need-help
#
# [4] https://www.kaggle.com/vanshjatana/help-humanity-by-helping-robots-4e306b by [@vanshjatana](https://www.kaggle.com/vanshjatana)
# # ABOUT Submissions & Leaderboard
# This kernel [distribution hack](https://www.kaggle.com/donkeys/distribution-hack) by [@donkeys](https://www.kaggle.com/donkeys) simply produces 9 output files, one for each target category.
# I submitted each of these to the competition to see how much of each target type exists in the test set distribution. Results:
#
# - carpet 0.06
# - concrete 0.16
# - fine concrete 0.09
# - hard tiles 0.06
# - hard tiles large space 0.10
# - soft pvc 0.17
# - soft tiles 0.23
# - tiled 0.03
# - wood 0.06
#
# Also posted a discussion [thread](https://www.kaggle.com/c/career-con-2019/discussion/85204)
#
#
# **by [@ninoko](https://www.kaggle.com/ninoko)**
#
# I've probed the public leaderboard and this is what I got
# There are much less surfaces like wood or tiled, and much more soft and hard tiles in public leaderboard. This can be issue, why CV and LB results differ strangely.
#
# 
# **I will analyze my best submissions in order to find something interesting.**
#
# Please, feel free to optimize this code.
# In[73]:
sub073 = pd.read_csv('../input/robots-best-submission/mybest0.73.csv')
sub072 = pd.read_csv('../input/robots-best-submission/sub_0.72.csv')
sub072_2 = | pd.read_csv('../input/robots-best-submission/sub_0.72_2.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import json
from collections import defaultdict
import spacy
def sentence_segment(text):
nlp = spacy.load("en_core_web_sm")
nlp_text = nlp(text)
return {'named_ents': [ent.string.strip() for ent in nlp_text.ents], 'sents': [sent.text for sent in nlp_text.sents]}
def main():
with open("/Users/tkrollins/OneDrive/Courses/capstone/question-answering/data/raw/train-v2.0.json") as data:
squad = json.load(data)['data']
articles = []
questions = defaultdict(list)
for i, article in enumerate(squad):
paragraphs = []
for j, paragraph in enumerate(article['paragraphs']):
for Q in paragraph['qas']:
questions['question_text'].append(Q['question'])
questions['is_impossible'].append(Q['is_impossible'])
questions['article'].append(i)
questions['paragraph'].append(j)
paragraphs.append(sentence_segment(paragraph['context']))
articles.append(np.array(paragraphs))
print(f'Article {i}')
articles = np.array(articles)
print(articles.shape)
np.save('../../data/interim/article_data.npy', articles)
questions = | pd.DataFrame(questions) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import netCDF4 as nc
from netCDF4 import Dataset
id
import itertools
import datetime
from scipy.stats import ks_2samp
import matplotlib.colors as colors
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import matplotlib.cm as cm
import os
#------------------------------------------------------------------------------
# Motivación codigo -----------------------------------------------------------
"Codigo para encntrar los porcentajes a partir de los cuales se consida un dato de nubosidad"
"tomando enc uenta los cambios en los datos de piranómetros, así encontrar un umbral promedio"
"apartir del cual se considere un porcentaje nublado en los datos de las Fisheye. Los datos"
"se trabajaran a resolucion minutal."
################################################################################################################
## -------------------------LECTURA DE LOS DATOS DE COBERTURA DE NUBES FISH EYE-------------------------------##
################################################################################################################
df_cloud_TS = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Fish_Eye/Totales/Total_Timeseries_FishEye_TS.csv', sep=',')
df_cloud_TS.columns = ['fecha_hora', 'Porcentaje']
df_cloud_TS.index = df_cloud_TS['fecha_hora']
df_cloud_TS = df_cloud_TS.drop(['fecha_hora'], axis =1)
df_cloud_TS.index = | pd.to_datetime(df_cloud_TS.index, format="%Y-%m-%d %H:%M", errors='coerce') | pandas.to_datetime |
"""
Functions for writing to .csv
September 2020
Written by <NAME>
"""
import os
import pandas as pd
import datetime
def define_deciles(regions):
"""
Allocate deciles to regions.
"""
regions = regions.sort_values(by='population_km2', ascending=True)
regions['decile'] = regions.groupby([
'GID_0',
'scenario',
'strategy',
'confidence'
], as_index=True).population_km2.apply( #cost_per_sp_user
pd.qcut, q=11, precision=0,
labels=[100,90,80,70,60,50,40,30,20,10,0],
duplicates='drop') # [0,10,20,30,40,50,60,70,80,90,100]
return regions
def write_demand(regional_annual_demand, folder):
"""
Write all annual demand results.
"""
print('Writing annual_mno_demand')
regional_annual_demand = pd.DataFrame(regional_annual_demand)
regional_annual_demand = regional_annual_demand.loc[
regional_annual_demand['scenario'] == 'baseline_10_10_10']
# regional_annual_mno_demand = regional_annual_demand[[
# 'GID_0', 'GID_id', 'scenario', 'strategy',
# 'confidence', 'year', 'population', 'area_km2', 'population_km2',
# 'geotype', 'arpu_discounted_monthly',
# 'penetration', 'population_with_phones','phones_on_network',
# 'smartphone_penetration', 'smartphones_on_network', 'revenue'
# ]]
# filename = 'regional_annual_mno_demand.csv'
# path = os.path.join(folder, filename)
# regional_annual_mno_demand.to_csv(path, index=False)
print('Writing annual_market_demand')
regional_annual_market_demand = regional_annual_demand[[
'GID_0', 'GID_id', 'scenario', 'strategy',
'confidence', 'year', 'population',
# 'population_f_over_10', 'population_m_over_10',
'area_km2', 'population_km2',
'geotype', 'arpu_discounted_monthly',
# 'penetration_female',
# 'penetration_male',
'penetration',
'population_with_phones',
# 'population_with_phones_f_over_10',
# 'population_with_phones_m_over_10',
'smartphone_penetration',
'population_with_smartphones',
# 'population_with_smartphones_f_over_10',
# 'population_with_smartphones_m_over_10',
'revenue'
]]
filename = 'regional_annual_market_demand.csv'
path = os.path.join(folder, filename)
regional_annual_market_demand.to_csv(path, index=False)
def write_results(regional_results, folder, metric):
"""
Write all results.
"""
print('Writing national MNO results')
national_results = pd.DataFrame(regional_results)
national_results = national_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population_total', 'area_km2',
'phones_on_network', 'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue', 'total_mno_cost',
]]
national_results = national_results.drop_duplicates()
national_results = national_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_results['cost_per_network_user'] = (
national_results['total_mno_cost'] / national_results['phones_on_network'])
national_results['cost_per_smartphone_user'] = (
national_results['total_mno_cost'] / national_results['smartphones_on_network'])
path = os.path.join(folder,'national_mno_results_{}.csv'.format(metric))
national_results.to_csv(path, index=True)
print('Writing national cost composition results')
national_cost_results = pd.DataFrame(regional_results)
national_cost_results = national_cost_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population_total',
'phones_on_network', 'smartphones_on_network', 'total_mno_revenue',
'ran', 'backhaul_fronthaul', 'civils', 'core_network',
'administration', 'spectrum_cost', 'tax', 'profit_margin',
'total_mno_cost', 'available_cross_subsidy', 'deficit',
'used_cross_subsidy', 'required_state_subsidy',
]]
national_cost_results = national_cost_results.drop_duplicates()
national_cost_results = national_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_cost_results['cost_per_network_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['phones_on_network'])
national_cost_results['cost_per_smartphone_user'] = (
national_cost_results['total_mno_cost'] /
national_cost_results['smartphones_on_network'])
#Calculate private, govt and societal costs
national_cost_results['private_cost'] = national_cost_results['total_mno_cost']
national_cost_results['government_cost'] = (
national_cost_results['required_state_subsidy'] -
(national_cost_results['spectrum_cost'] + national_cost_results['tax']))
national_cost_results['societal_cost'] = (
national_cost_results['private_cost'] + national_cost_results['government_cost'])
path = os.path.join(folder,'national_mno_cost_results_{}.csv'.format(metric))
national_cost_results.to_csv(path, index=True)
print('Writing general decile results')
decile_results = pd.DataFrame(regional_results)
decile_results = define_deciles(decile_results)
decile_results = decile_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population_total', 'area_km2', 'phones_on_network',
'smartphones_on_network', 'total_estimated_sites',
'existing_mno_sites', 'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue', 'total_mno_cost',
]]
decile_results = decile_results.drop_duplicates()
decile_results = decile_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_results['population_km2'] = (
decile_results['population_total'] / decile_results['area_km2'])
decile_results['phone_density_on_network_km2'] = (
decile_results['phones_on_network'] / decile_results['area_km2'])
decile_results['sp_density_on_network_km2'] = (
decile_results['smartphones_on_network'] / decile_results['area_km2'])
decile_results['total_estimated_sites_km2'] = (
decile_results['total_estimated_sites'] / decile_results['area_km2'])
decile_results['existing_mno_sites_km2'] = (
decile_results['existing_mno_sites'] / decile_results['area_km2'])
decile_results['cost_per_network_user'] = (
decile_results['total_mno_cost'] / decile_results['phones_on_network'])
decile_results['cost_per_smartphone_user'] = (
decile_results['total_mno_cost'] / decile_results['smartphones_on_network'])
path = os.path.join(folder,'decile_mno_results_{}.csv'.format(metric))
decile_results.to_csv(path, index=True)
print('Writing cost decile results')
decile_cost_results = pd.DataFrame(regional_results)
decile_cost_results = define_deciles(decile_cost_results)
decile_cost_results = decile_cost_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population_total', 'area_km2', 'phones_on_network', 'smartphones_on_network',
'total_mno_revenue', 'ran', 'backhaul_fronthaul', 'civils', 'core_network',
'administration', 'spectrum_cost', 'tax', 'profit_margin', 'total_mno_cost',
'available_cross_subsidy', 'deficit', 'used_cross_subsidy',
'required_state_subsidy',
]]
decile_cost_results = decile_cost_results.drop_duplicates()
decile_cost_results = decile_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_cost_results['cost_per_network_user'] = (
decile_cost_results['total_mno_cost'] / decile_cost_results['phones_on_network'])
decile_cost_results['cost_per_smartphone_user'] = (
decile_cost_results['total_mno_cost'] / decile_cost_results['smartphones_on_network'])
path = os.path.join(folder,'decile_mno_cost_results_{}.csv'.format(metric))
decile_cost_results.to_csv(path, index=True)
print('Writing regional results')
regional_mno_results = pd.DataFrame(regional_results)
regional_mno_results = define_deciles(regional_mno_results)
regional_mno_results = regional_mno_results[[
'GID_0', 'GID_id', 'scenario', 'strategy', 'decile',
'confidence', 'population_total', 'area_km2',
'phones_on_network', 'smartphones_on_network',
'total_estimated_sites', 'existing_mno_sites',
'upgraded_mno_sites', 'new_mno_sites',
'total_mno_revenue', 'total_mno_cost',
]]
regional_mno_results = regional_mno_results.drop_duplicates()
regional_mno_results['cost_per_network_user'] = (
regional_mno_results['total_mno_cost'] / regional_mno_results['phones_on_network'])
regional_mno_results['cost_per_smartphone_user'] = (
regional_mno_results['total_mno_cost'] / regional_mno_results['smartphones_on_network'])
path = os.path.join(folder,'regional_mno_results_{}.csv'.format(metric))
regional_mno_results.to_csv(path, index=True)
print('Writing national market results')
national_results = pd.DataFrame(regional_results)
national_results = national_results[[
'GID_0', 'scenario', 'strategy', 'confidence',
'population_total', 'area_km2',
'total_phones', 'total_smartphones',
'total_estimated_sites',
'total_upgraded_sites',
'total_new_sites',
'total_market_revenue', 'total_market_cost',
'total_spectrum_cost', 'total_tax',
'total_required_state_subsidy',
]]
national_results = national_results.drop_duplicates()
national_results = national_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_results['cost_per_network_user'] = (
national_results['total_market_cost'] / national_results['total_phones'])
national_results['cost_per_smartphone_user'] = (
national_results['total_market_cost'] / national_results['total_smartphones'])
national_results['private_cost'] = (
national_results['total_market_cost'])
national_results['government_cost'] = (
national_results['total_required_state_subsidy'] -
(national_results['total_spectrum_cost'] + national_results['total_tax']))
national_results['social_cost'] = (
national_results['private_cost'] + national_results['government_cost'])
path = os.path.join(folder,'national_market_results_{}.csv'.format(metric))
national_results.to_csv(path, index=True)
#=cost / market share * 100
print('Writing national market cost composition results')
national_cost_results = pd.DataFrame(regional_results)
national_cost_results = national_cost_results[[
'GID_0', 'scenario', 'strategy', 'confidence', 'population_total',
'total_phones', 'total_smartphones',
'total_market_revenue', 'total_ran', 'total_backhaul_fronthaul',
'total_civils', 'total_core_network',
'total_administration', 'total_spectrum_cost',
'total_tax', 'total_profit_margin',
'total_market_cost', 'total_available_cross_subsidy',
'total_deficit', 'total_used_cross_subsidy',
'total_required_state_subsidy',
]]
national_cost_results = national_cost_results.drop_duplicates()
national_cost_results = national_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence'], as_index=True).sum()
national_cost_results['cost_per_network_user'] = (
national_cost_results['total_market_cost'] / national_cost_results['total_phones'])
national_cost_results['cost_per_smartphone_user'] = (
national_cost_results['total_market_cost'] / national_cost_results['total_smartphones'])
#Calculate private, govt and societal costs
national_cost_results['private_cost'] = (
national_cost_results['total_market_cost'])
national_cost_results['government_cost'] = (
national_cost_results['total_required_state_subsidy'] -
(national_cost_results['total_spectrum_cost'] + national_cost_results['total_tax']))
national_cost_results['societal_cost'] = (
national_cost_results['private_cost'] + national_cost_results['government_cost'])
path = os.path.join(folder,'national_market_cost_results_{}.csv'.format(metric))
national_cost_results.to_csv(path, index=True)
print('Writing general decile results')
decile_results = pd.DataFrame(regional_results)
decile_results = define_deciles(decile_results)
decile_results = decile_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population_total', 'area_km2', 'total_phones', 'total_smartphones',
'total_market_revenue', 'total_market_cost',
]]
decile_results = decile_results.drop_duplicates()
decile_results = decile_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_results['population_km2'] = (
decile_results['population_total'] / decile_results['area_km2'])
decile_results['cost_per_network_user'] = (
decile_results['total_market_cost'] / decile_results['total_phones'])
decile_results['cost_per_smartphone_user'] = (
decile_results['total_market_cost'] / decile_results['total_smartphones'])
path = os.path.join(folder,'decile_market_results_{}.csv'.format(metric))
decile_results.to_csv(path, index=True)
print('Writing cost decile results')
decile_cost_results = pd.DataFrame(regional_results)
decile_cost_results = define_deciles(decile_cost_results)
decile_cost_results = decile_cost_results[[
'GID_0', 'scenario', 'strategy', 'decile', 'confidence',
'population_total', 'area_km2', 'total_phones', 'total_smartphones',
'total_market_revenue', 'total_ran', 'total_backhaul_fronthaul',
'total_civils', 'total_core_network',
'total_administration', 'total_spectrum_cost', 'total_tax',
'total_profit_margin', 'total_market_cost',
'total_available_cross_subsidy', 'total_deficit',
'total_used_cross_subsidy', 'total_required_state_subsidy'
]]
decile_cost_results = decile_cost_results.drop_duplicates()
decile_cost_results = decile_cost_results.groupby([
'GID_0', 'scenario', 'strategy', 'confidence', 'decile'], as_index=True).sum()
decile_cost_results['cost_per_network_user'] = (
decile_cost_results['total_market_cost'] /
decile_cost_results['total_phones'])
decile_cost_results['cost_per_smartphone_user'] = (
decile_cost_results['total_market_cost'] /
decile_cost_results['total_smartphones'])
path = os.path.join(folder,'decile_market_cost_results_{}.csv'.format(metric))
decile_cost_results.to_csv(path, index=True)
print('Writing regional results')
regional_market_results = | pd.DataFrame(regional_results) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 30 13:43:12 2021
@author: @hk_nien
"""
from multiprocessing import Pool, cpu_count
import random
import pandas as pd
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
import numpy as np
from nl_regions import get_holiday_regions_by_ggd
import nlcovidstats as nlcs
import casus_analysis as ca
from tools import set_xaxis_dateformat
def invert_mapping(m):
"""Invert mapping k->[v0, ...] to Series with v->k."""
mi = {}
for k, vs in m.items():
for v in vs:
mi[v] = k
return pd.Series(mi)
def _get_summary_1date(date):
"""Return summary dict for casus data for specified date (yyyy-mm-dd)."""
print('.', end='', flush=True)
df = ca.load_casus_data(date)
# invert mapping
regions_ggd2hol = invert_mapping(get_holiday_regions_by_ggd())
age_group_agg = invert_mapping({
'0-19': ['0-9', '10-19'],
'20+': ['20-29', '30-39', '40-49', '50-59', '60-69', '70-79', '80-89', '90+'],
'Other': ['<50', 'Unknown']
})
df['Holiday_region'] = regions_ggd2hol[df['Municipal_health_service']].values
missing = df['Holiday_region'].isna()
if np.any(missing):
missing = df.loc[missing, 'Municipal_health_service'].unique()
msg = f'Unrecognized municipal health service {missing}'
raise KeyError(msg)
df['Agegroup_agg'] = age_group_agg[df['Agegroup']].values
summary = {'Date_file': df.iloc[-1]['Date_file']}
for (hr, aga), df1 in df.groupby(['Holiday_region', 'Agegroup_agg']):
summary[f'{hr}:{aga}'] = len(df1)
summary = {k: summary[k] for k in sorted(summary)}
return summary
def get_summary_hr_age_by_date_range(
date_a='2020-07-01', date_b='2099-01-01',
csv_fn='data/cum_cases_by_holiday_region_and_age.csv'
):
"""Return summary DataFrame for date range. Parallel processing.
Parameters:
- date_a: lowest date ('yyyy-mm-dd')
- date_b: highest date ('yyyy-mm-dd')
(or far in the future to use whatever available).
- csv_fn: optional csv filename to load data from. Data not in CSV
will be regenerated (slow). File will be updated with any new data.
Set to None to skip.
If set but file is nonexistent, a new file will be created.
Return DataFrame:
- index: Date_file as Timestamp (0:00:00 time of day), for file Date.
- columns: (Midden|Noord|Zuid):(0-19|20+|Other)'.
Values: total number of cases up to that date.
"""
date_a, date_b = [pd.to_datetime(d) for d in [date_a, date_b]]
if date_a < pd.to_datetime('2020-07-01'):
raise ValueError(f'date_a {date_a}: no data available.')
if csv_fn:
try:
df_cached = pd.read_csv(csv_fn)
df_cached['Date_file'] = pd.to_datetime(df_cached['Date_file'])
df_cached.set_index('Date_file', inplace=True)
except FileNotFoundError:
print(f'Warning: no csv file {csv_fn!r}; will create new.')
df_cached = pd.DataFrame()
dates = []
date = date_a
while date <= date_b:
if date not in df_cached.index:
try:
ca._find_casus_fpath(date.strftime('%Y-%m-%d'))
except FileNotFoundError:
break
dates.append(date)
date += pd.Timedelta(1, 'd')
if len(dates) > 0:
print(f'Processing casus data for {len(dates)} dates. This may take a while.')
# shuffle order so that the progress indicator doesn't slow down
# towards the end when the data is large.
random.seed(1)
random.shuffle(dates)
ncpus = cpu_count()
print(f'({ncpus} workers)', end='', flush=True)
with Pool(ncpus) as p:
summaries = p.map(_get_summary_1date, dates)
print('done.')
df_new = pd.DataFrame.from_records(summaries).set_index('Date_file')
else:
df_new = | pd.DataFrame() | pandas.DataFrame |
#! /usr/bin/env python
#pylint: disable=invalid-name,too-many-arguments,too-many-locals; extension-pkg-whitelist=lxml
"""
Functions used by the other parts of the package
"""
from __future__ import print_function
from io import BytesIO
import base64
import os
import sys
import psutil
import matplotlib#pylint: disable=wrong-import-position
matplotlib.use('Agg')#pylint: disable=wrong-import-position
from Bio import Phylo as ph
from bs4 import BeautifulSoup as bs
from lxml import etree as et
from pandas import read_csv
from seaborn import heatmap
from tqdm import tqdm
import pylab
import numpy as np
import jinja2 as jj2
import pandas as pd
import requests as rq
import matplotlib.pyplot as plt#pylint: disable=ungrouped-imports
def get_dir_path(file_name=""):
"""
Find out what is the script system path and return its location. Optionally
put desired file name at the end of the path. Facilitates access to files
stored in the same directory as executed script. Requires the executed
script being added to the system path
Parameters
--------
file_name: str, default <"">
File name to put at the end of the path. Use empty string if want just
the directory.
Returns
--------
str
System path of the executable.
Examples
-------
>>> get_dir_path() # doctest: +SKIP
'/home/user/program/bin/'
>>> get_dir_path("foo") # doctest: +SKIP
'/home/user/program/bin/foo'
"""
# prog_path = sys.argv[0].replace(sys.argv[0].split("/")[-1],
# file_name)
prog_path = "/".join(sys.argv[0].split("/")[:-1] + [file_name])
return os.path.abspath(prog_path)
def path2name(path,
slash="/",
hid_char=".",
extension=False):
"""
Returns just filename with or without extension from the full path.
Parameters
-------
path: str
Input path.
slash: str
Slash to use. Backslash does NOT work properly yet. Default: </>.
hid_char: str
Character indicating that file is hidden. Default: <.>
extension: bool
Return filename with extension if <True>. Remove extension\
otherwise. Default: <False>.
Returns
-------
str
Filename from the path.
Examples
-------
>>> path2name("/home/user/foo.bar")
'foo'
>>> path2name("/home/user/.foo.bar")
'foo'
>>> path2name("/home/user/foo.bar", extension=True)
'foo.bar'
>>> path2name("/home/user/.foo.bar", extension=True)
'foo.bar'
"""
if extension is True:
return str(path.split(slash)[-1].strip(hid_char))
return str(path.split(slash)[-1].strip(hid_char).split(".")[0])
def determine_cpus(memory_per_cpu=3):
"""
Returns the number of CPUS to use assuming there must be a minimal size of
RAM per CPU core.
memory_per_cpu: int
GigaBytes of RAM that should be saved for a single CPU core. Default
<3>
"""
cpus = psutil.cpu_count()
mem = psutil.virtual_memory().total / 1024 ** 3
supp_cpus = int(mem / memory_per_cpu)
if supp_cpus > cpus:
return cpus
return supp_cpus
def load_template_file(template_file,
searchpath="/"):
"""
Load jinja2 template file. Search path starts from root directory so no
chroot.
Parameters
-------
template_file: str
Template file name.
searchpath: str, default </>
Root directory for template lookup.
Returns
-------
jinja2.Template
Examples
-------
>>> import jinja2
>>> lt = load_template_file("./tests/test.jj2", searchpath=".")
>>> isinstance(lt, jinja2.environment.Template)
True
"""
template_loader = jj2.FileSystemLoader(searchpath=searchpath)
template_env = jj2.Environment(loader=template_loader)
template = template_env.get_template(template_file)
return template
def render_template(template_loaded,
template_vars):
"""
Render jinja2.Template to unicode.
Parameters
-------
loaded_template: jinj2.Template
Template to render.
template_vars: dict
Variables to be rendered with the template.
Returns
-------
unicode
Template content with passed variables.
Examples
-------
>>> lt = load_template_file("./tests/test.jj2",\
searchpath=".")
>>> vars = {"word1": "ipsum", "word2": "adipisicing", "word3": "tempor"}
>>> rt = render_template(lt, vars)
>>> str(rt)
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt.'
"""
template_rendered = template_loaded.render(template_vars)
return template_rendered
def save_template(out_file_name,
template_rendered):
"""
Save rendered template to file.
Parameters
-------
out_file_name: str
Output file name.
template_rendered: unicode
Temlplate rendered to unicode object.
"""
with open(out_file_name, "wb") as fout:
fout.write(template_rendered.encode("utf-8"))
def read_info_shared(input_file_name,#pylint: disable=dangerous-default-value,too-many-locals
min_fold=5,
cols={
'label': 'label',
'group': "Group",
'otu': "Otu",
'num': "numOtus",
},
sep="\t",
format_junk_grps=True):
"""
Extracts information from mothur's shared file.
Parameters
-------
input_file_name: str
Input file name.
min_fold: int
Fraction of mean group size below which groups will be removed before
analysis.
label_col: str
Label column name in shared file.
group_col: str
Group column name in shared file.
otu_col: str
OTU column name prefix in shared file.
num_col: str
Number of OTUs column name in shared file.
sep: str, default <\t>
Delimiter to use for reading-in shared file.
format_junk_grps: bool, default <True>
Join names of groups to remove by <-> before passing to mothur.
Returns
-------
dict
Information about label, number of samples and groups to remove.
Examples
-------
>>> shared_info = read_info_shared(input_file_name="./tests/test.shared")
>>> shared_info["samples_number"]
9
>>> float(shared_info["label"])
0.03
>>> shared_info["junk_grps"]
'F3D141-F3D143-F3D144'
"""
dtypes = {cols['label']: "str"}
shared_df = | pd.read_csv(input_file_name, sep=sep, dtype=dtypes) | pandas.read_csv |
from contextlib import contextmanager
import pandas as pd
from dataviper.logger import IndentLogger
from dataviper.report.profile import Profile
from dataviper.source.datasource import DataSource
import pymysql
class MySQL(DataSource):
"""
class MySQL is a connection provider for MySQL
and query builder as well.
"""
def __init__(self, config={}, sigfig=4, logger=IndentLogger()):
self.config = config
self.sigfig = sigfig
self.logger = logger
@contextmanager
def connect(self, config=None):
config = config if config is not None else self.config
self.__conn = pymysql.connect(**config)
try:
yield
finally:
self.__conn.close()
def get_schema(self, table_name):
self.logger.enter("START: get_schema")
query = self.__get_schema_query(table_name)
schema_df = pd.read_sql(query, self.__conn)
schema_df = schema_df[['column_name', 'data_type']].set_index('column_name')
schema_df.index = schema_df.index.str.lower()
profile = Profile(table_name, schema_df)
profile = self.count_total(profile)
self.logger.exit("DONE: get_schema")
return profile
def count_total(self, profile):
self.logger.enter("START: count_total")
query = "SELECT COUNT(*) AS total FROM {}".format(profile.table_name)
df = pd.read_sql(query, self.__conn)
profile.total = int(df['total'][0])
self.logger.exit("DONE: count_total")
return profile
def __get_schema_query(self, table_name):
return '''
SELECT
COLUMN_NAME as column_name,
COLUMN_TYPE as data_type
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME='{}'
'''.format(table_name).strip()
def count_null(self, profile):
self.logger.enter("START: count_null")
if profile.total is None:
profile = self.count_total(profile)
query = self.__count_null_query(profile)
null_count_df = pd.read_sql(query, self.__conn)
null_count_df = null_count_df.T.rename(columns={0: 'null_count'})
null_count_df['null_%'] = round((null_count_df['null_count'] / profile.total) * 100, self.sigfig)
profile.schema_df = profile.schema_df.join(null_count_df)
self.logger.exit("DONE: count_null")
return profile
def __count_null_query(self, profile):
queries = []
for column_name in profile.schema_df.index:
queries += [self.__count_null_query_for_a_column(profile.table_name, column_name, profile.total)]
return 'SELECT\n{0}\nFROM {1}'.format(',\n'.join(queries), profile.table_name)
def __count_null_query_for_a_column(self, table_name, column_name, total):
"""
TODO: Don't use .format, use SQL placeholder and parameter markers.
See https://docs.microsoft.com/en-us/sql
/odbc/reference/develop-app/binding-parameter-markers?view=sql-server-2017
"""
return '{0} - COUNT({1}) AS {1}'.format(total, column_name)
def get_deviation(self, profile):
self.logger.enter("START: get_deviation")
devis = pd.DataFrame()
for column_name in profile.schema_df.index:
data_type = profile.schema_df.at[column_name, 'data_type']
df = self.__get_deviation_df_for_a_column(profile.table_name, column_name, data_type)
if df is not None:
devis = devis.append(df, sort=False)
profile.schema_df = profile.schema_df.join(devis, how='left')
self.logger.exit("DONE: get_deviation")
return profile
def __get_deviation_df_for_a_column(self, table_name, column_name, data_type='int'):
if all(not data_type.startswith(t) for t in (
'int', 'bigint', 'float', 'date', 'datetime', 'bit', 'varchar', 'nvarchar'
)):
self.logger.info("PASS:", column_name, "because it's {}".format(data_type))
return
try:
self.logger.enter("START:", column_name, data_type)
query = self.__get_deviation_query_for_a_column(table_name, column_name, data_type)
df = pd.read_sql(query, self.__conn)
df.index = [column_name]
return df
except Exception as e:
self.logger.error("get_deviation", e)
finally:
self.logger.exit("DONE:", column_name)
return None
def __get_deviation_query_for_a_column(self, table_name, column_name, data_type):
"""
TODO: Don't use .format, use SQL placeholder and parameter markers.
See https://docs.microsoft.com/en-us/sql
/odbc/reference/develop-app/binding-parameter-markers?view=sql-server-2017
"""
if any(data_type.startswith(t) for t in ('bigint', 'int', 'float', 'bit')):
return '''
SELECT
MIN({0}) as min,
MAX({0}) as max,
AVG({0}) as avg,
STD({0}) as std
FROM {1}
'''.format(column_name, table_name).strip()
if any(data_type.startswith(t) for t in ('datetime')):
return '''
SELECT
MIN({0}) as min,
MAX({0}) as max,
CAST(AVG({0}) AS DATETIME) as avg
FROM {1}
'''.format(column_name, table_name).strip()
if any(data_type.startswith(t) for t in ('date')):
return '''
SELECT
MIN({0}) as min,
MAX({0}) as max,
CAST(AVG({0}) AS DATE) as avg
FROM {1}
'''
return '''
SELECT
MIN({0}) as min,
MAX({0}) as max
FROM {1}
'''.format(column_name, table_name).strip()
def count_unique(self, profile):
self.logger.enter("START: count_unique")
if profile.total is None:
profile = self.count_total(profile)
variations = pd.DataFrame()
for column_name in profile.schema_df.index:
self.logger.enter("START:", column_name)
df = self.__count_unique_df_for_a_column(profile.table_name, column_name)
variations = variations.append(df)
self.logger.exit("DONE:", column_name)
profile.schema_df = profile.schema_df.join(variations, how='left')
profile.schema_df['unique_%'] = round((profile.schema_df['unique_count'] / profile.total) * 100, self.sigfig)
self.logger.exit("DONE: count_unique")
return profile
def __count_unique_df_for_a_column(self, table_name, column_name):
query = self.__count_unique_query_for_a_column(table_name, column_name)
df = | pd.read_sql(query, self.__conn) | pandas.read_sql |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
import unittest
import numpy as np
import pandas as pd
from nimbusml import Pipeline, FileDataStream, BinaryDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.linear_model import FastLinearRegressor, OnlineGradientDescentRegressor
from nimbusml.preprocessing.normalization import MinMaxScaler
from nimbusml.preprocessing.schema import ColumnDropper
from sklearn.utils.testing import assert_true, assert_array_equal
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(
path,
sep=',',
numeric_dtype=np.float32) # Error with integer input
def is_nan(x):
return (x is np.nan or x != x)
def assert_2d_array_equal(actual, desired):
if len(actual) != len(desired):
assert_true(False, "arrays are of different lengths.")
for i in range(len(actual)):
if len(actual[i]) != len(desired[i]):
assert_true(False, "arrays are of different lengths.")
for y in range(len(actual[i])):
if is_nan(actual[i][y]) and is_nan(desired[i][y]):
continue
assert_true(actual[i][y] == desired[i][y])
def transform_data():
xf = MinMaxScaler(columns={'in': 'induced', 'sp': 'spontaneous'})
pipe = Pipeline([xf])
transformed_data = pipe.fit_transform(data, as_binary_data_stream=True)
transformed_data_df = pipe.fit_transform(data)
return transformed_data, transformed_data_df
class TestIdv(unittest.TestCase):
def test_fit_transform(self):
transformed_data, transformed_data_df = transform_data()
assert_true(isinstance(transformed_data, BinaryDataStream))
transformed_data_as_df = transformed_data.to_df()
assert_true(isinstance(transformed_data_df, pd.DataFrame))
assert_array_equal(
transformed_data_as_df.columns,
transformed_data_df.columns)
assert_2d_array_equal(
transformed_data_as_df.values,
transformed_data_df.values)
def test_predict(self):
transformed_data, transformed_data_df = transform_data()
fl = FastLinearRegressor(
feature=[
'parity',
'in',
'sp',
'stratum'],
label='age')
flpipe = Pipeline([fl])
flpipe.fit(transformed_data)
scores = flpipe.predict(transformed_data)
scores_df = flpipe.predict(transformed_data_df)
assert_array_equal(scores, scores_df)
def test_test(self):
transformed_data, transformed_data_df = transform_data()
fl = FastLinearRegressor(
feature=[
'parity',
'in',
'sp',
'stratum'],
label='age')
flpipe = Pipeline([fl])
flpipe.fit(transformed_data)
metrics, scores = flpipe.test(transformed_data, output_scores=True)
metrics_df, scores_df = flpipe.test(
transformed_data_df, output_scores=True)
assert_array_equal(scores, scores_df)
assert_array_equal(metrics, metrics_df)
flpipe.fit(
transformed_data_df.drop(
'age',
axis=1),
transformed_data_df['age'])
metrics, scores = flpipe.test(transformed_data, output_scores=True)
metrics_df, scores_df = flpipe.test(
transformed_data_df, output_scores=True)
assert_array_equal(scores, scores_df)
assert_array_equal(metrics, metrics_df)
def test_fit_predictor_with_idv(self):
train_data = {'c0': ['a', 'b', 'a', 'b'],
'c1': [1, 2, 3, 4],
'c2': [2, 3, 4, 5]}
train_df = pd.DataFrame(train_data).astype({'c1': np.float64,
'c2': np.float64})
test_data = {'c0': ['a', 'b', 'b'],
'c1': [1.5, 2.3, 3.7],
'c2': [2.2, 4.9, 2.7]}
test_df = | pd.DataFrame(test_data) | pandas.DataFrame |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 17 10:51:50 2018
@author: nmei
"""
import pandas as pd
import numpy as np
figure_dir = '../figures'
save_dir = '../results'
from utils import resample_ttest_2sample,MCPConverter
# exps
pos = pd.read_csv('../results/Pos.csv')
att = pd.read_csv('../results/ATT.csv')
# epx 1
results = dict(greater = [],
lesser = [],
ps_mean = [],
ps_std = [],
model = [],)
df = pos[(pos['window'] > 0) & (pos['window'] < 4)]
for model,df_sub in df.groupby('model'):
pairs = [['awareness','confidence'],
['awareness','correct'],
['confidence','correct']]
for pair in pairs:
a = df_sub[pair[0]].values
b = df_sub[pair[1]].values
if a.mean() < b.mean():
pair = [pair[1],pair[0]]
a = df_sub[pair[0]].values
b = df_sub[pair[1]].values
ps = resample_ttest_2sample(a,b,500,10000)
results['greater'].append(pair[0])
results['lesser'].append(pair[1])
results['ps_mean'].append(ps.mean())
results['ps_std'].append(ps.std())
results['model'].append(model)
results = pd.DataFrame(results)
temp = []
for modle, df_sub in results.groupby('model'):
idx_sort = np.argsort(df_sub['ps_mean'].values)
df_sub = df_sub.iloc[idx_sort,:]
converter = MCPConverter(df_sub['ps_mean'].values)
d = converter.adjust_many()
df_sub['p_corrected'] = d['bonferroni'].values
temp.append(df_sub)
results = | pd.concat(temp) | pandas.concat |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = | fields.get_date_name_field(values, "day_name", locale=locale) | pandas._libs.tslibs.fields.get_date_name_field |
import sys
import os
import pandas as pd
from numpy import floor, log10, isnan, nan, isinf
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QGridLayout, QLabel, QComboBox, QLineEdit, QPushButton, QCheckBox
from PyQt5.QtCore import pyqtSignal
from PyQt5 import QtCore
from PyQt5 import QtGui
from PyQt5.QtGui import QDoubleValidator
from IGM.rb_setline import read_line_list
LINELIST_DIR = os.path.dirname(os.path.abspath(__file__))
# This widget includes the primary linelist, redshift estimation
# and secondary linelists.
# Since this class has complicated structure in terms of widget composition,
# only import useful modules/functions to avoid slow-down running time
class LineListWidget(QWidget):
# Linelist constant
# only need to update this one
LINELISTS = ['NONE']
with open(LINELIST_DIR+'/gui_linelists.ascii') as f:
next(f)
for line in f:
LINELISTS.append(line.strip())
# check function _get_linelist_df if any error showed up
# Exporting signals
send_lineindex = pyqtSignal(int)
send_linelist = pyqtSignal(object)
send_more_linelist = pyqtSignal(object)
send_more_linelist_z = pyqtSignal(object)
send_data = pyqtSignal(object)
send_gauss_num = pyqtSignal(int)
send_message = pyqtSignal(str)
send_z_returnPressed = pyqtSignal(float)
send_linelists2multiG = pyqtSignal(list)
# initialization - widget layout
def __init__(self):
super().__init__()
#internal values
self.linelist = []
self.filename = ''
self.filenames = []
self.newz = []
# Main(Grand) widget layout
glayout = QVBoxLayout()
# Widget column names
layout = QGridLayout()
layout.addWidget(QLabel('LineList Name'), 0, 0)
layout.addWidget(QLabel('Ion Name'), 0, 1)
layout.addWidget(QLabel('#Gauss'), 0, 2)
layout.addWidget(QLabel('Estimated z'), 0, 3)
layout.addWidget(QLabel('z error'), 0, 4)
layout.addWidget(QLabel('Confidence'), 0, 5)
layout.addWidget(QLabel('Flag'), 0, 6)
# linelist combobox
self.l_lln = QComboBox()
self.l_lln.setFixedWidth(120)
self.l_lln.addItems(self.LINELISTS)
layout.addWidget(self.l_lln, 1, 0)
# selecting a linelist in linelists box triggers another action
self.l_lln.currentTextChanged.connect(self._linelist_changed)
# Ion Names in this selected line-list
# Note: 'ALL' is in index 0
self.l_combobox = QComboBox()
self.l_combobox.setFixedWidth(150)
layout.addWidget(self.l_combobox, 1, 1)
self.l_combobox.addItem('NONE')
self.l_combobox.setCurrentIndex(0)
# selecting an ion in a linelist triggers another action
self.l_combobox.currentIndexChanged.connect(self._index_changed)
#self.l_combobox.currentTextChanged.connect(self._text_changed)
# Number of Gaussian specified
self.gauss_num = QComboBox()
self.gauss_num.setFixedWidth(50)
self.gauss_num.addItems(['0', '1', '2', '3'])
self.gauss_num.setCurrentIndex(1)
# selecting a number of the box triggers another action
self.gauss_num.activated.connect(self._on_gauss_num_activated)
layout.addWidget(self.gauss_num, 1,2)
# User-input textboxes
# this validator only allows user to type numbers
self.onlyFloat = QDoubleValidator()
# Estimated redshift
self.estZ = QLineEdit()
self.estZ.setPlaceholderText('Guess redshift')
self.estZ.setMaximumWidth(100)
self.estZ.setValidator(self.onlyFloat)
# pressing return button triggers another action
self.estZ.returnPressed.connect(self._on_z_return_pressed)
# Errors in estimated redshift
self.estZstd = QLineEdit()
self.estZstd.setPlaceholderText('z Error')
self.estZstd.setMaximumWidth(100)
self.estZstd.setValidator(self.onlyFloat)
self.estZstd.setReadOnly(True)
# Confidence level
self.conf = QLineEdit()
self.conf.setPlaceholderText('[0, 1.]')
self.conf.setMaximumWidth(150)
self.conf_onlyFloat = QDoubleValidator(bottom=0.,
top=1.,
decimals=3,
notation=QDoubleValidator.StandardNotation)
self.conf.setValidator(self.conf_onlyFloat)
# Flags/Comments on this
self.flag = QLineEdit()
self.flag.setPlaceholderText('Additional Info?')
# Button to save current estimation to table/database
button = QPushButton('Add to Table below')
# clicking button triggers another action
button.clicked.connect(self._on_button_clicked)
layout.addWidget(self.estZ, 1,3)
layout.addWidget(self.estZstd, 1, 4)
layout.addWidget(self.conf, 1,5)
layout.addWidget(self.flag, 1,6)
layout.addWidget(button, 1,7)
# Secondary linelists
l_checkbox = QCheckBox('Add More Linelists to Examine..')
# only triggers to set up more secondary linelist when checked
l_checkbox.stateChanged.connect(self._intialize_more_linelist)
l_hlayout = QHBoxLayout()
# Number of secondary linelists needed
num_llists = 6
self.llists_2 = []
for i in range(num_llists):
self.llists_2.append(self.add_linelists())
#indices for llists_2:
# 0-layout, 1-linelist combobox, 2-z lineedit
l_hlayout.addLayout(self.llists_2[i][0])
glayout.addLayout(layout)
glayout.addWidget(l_checkbox)
glayout.addLayout(l_hlayout)
glayout.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
self.setLayout(glayout)
self.setFixedHeight(150)
# utility function to set up secondary linelist
def add_linelists(self):
ll_layout = QGridLayout()
l_combox = QComboBox()
l_combox.setFixedWidth(80)
z_edit = QLineEdit()
z_edit.setPlaceholderText('Guess z')
z_edit.setReadOnly(True)
z_edit.setMaximumWidth(60)
#ll_layout.addWidget(QLabel('Linelist'), 0,0)
#ll_layout.addWidget(QLabel('Guess z'), 0,1)
ll_layout.addWidget(l_combox, 0,0)
ll_layout.addWidget(z_edit, 0,1)
ll_layout.setAlignment(QtCore.Qt.AlignLeft)
return ll_layout, l_combox, z_edit
# intialize secondary linelist sytles
def _intialize_more_linelist(self, s):
# if the checkbox is checked, initialize secondary linelists
if s == QtCore.Qt.Checked:
# initialize more linelists for plotting
colors = ['#A52A2A', '#FF7F50', '#40E0D0', '#DAA520', '#008000', '#4B0082']
for i in range(len(self.llists_2)):
self.llists_2[i][1].addItems(self.LINELISTS)
t_color = 'QComboBox {color:' + colors[i] + '}'
self.llists_2[i][1].setStyleSheet(t_color)
# 2 parameters need to be passed: 1.selected linelist and 2.index of linelist widget changed
self.llists_2[i][1].currentTextChanged.connect(lambda s, idx=i: self._addtional_linelist(idx, s))
self.llists_2[i][2].setReadOnly(False)
# only the index of linelist widget passed
self.llists_2[i][2].returnPressed.connect(lambda idx=i: self._guess_z_return_pressed(idx))
else:
# if checkbox is unchecked, grey out all things
for i in range(len(self.llists_2)):
self.llists_2[i][1].clear()
self.llists_2[i][2].clear()
self.llists_2[i][2].setReadOnly(True)
# action to press return button on estZ LineEdit
def _on_z_return_pressed(self):
self.send_z_returnPressed.emit(float(self.estZ.text()))
if self.gauss_num.currentIndex() < 1:
self.estZstd.setText('nan')
# this will not changed the default self.newz values
# importing signal(linelist name) to slot
def on_linelist_name_slot(self, sent_linelist_name):
self.l_lln.setText(sent_linelist_name)
def on_linelist_slot(self, sent_linelist):
self.linelist = sent_linelist
#self.l_combobox.addItems(['NONE', 'ALL'] + self.linelist['name'].tolist())
#print(self.linelist)
# ion line combobox events
def _index_changed(self, i): # i is an int
self.send_lineindex.emit(i)
def _text_changed(self, s): # s is a str
tmp_df = self.linelist.set_index('name')
# Display all available ions in a selected linelist
def _linelist_changed(self, s):
if s in 'NONE':
self.send_linelist.emit(s)
self.l_combobox.clear()
self.l_combobox.addItem('NONE')
self.l_combobox.setCurrentIndex(0)
else:
llist = self._get_linelist_df(s)
self.linelist = llist
self.l_combobox.addItems(['ALL'] + self.linelist['name'].tolist())
self.send_linelist.emit(self.linelist)
self.l_combobox.setCurrentIndex(1)
# display estimated redshifts and error
# with specified significant figures
def _on_estZ_changed(self, newz):
show_sigfig = 5
self.newz = newz
if not isnan(float(self.newz[0])):
self.estZ.setText(str(self.round_to_sigfig(newz[0], show_sigfig)))
if self.gauss_num.currentIndex() > 0:
# Except for manually guessing z (i.e., gauss_num=0),
# sending out estZ to plot automatically without return pressed
self.send_z_returnPressed.emit(self.newz[0])
else:
self.estZ.setText('nan')
if not isnan(float(self.newz[1])):
self.estZstd.setText(str(self.round_to_sigfig(newz[1], show_sigfig)))
else:
self.estZstd.setText('nan')
# importing signal(filename) to slot
def _on_sent_filename(self, sent_filename):
self.filename = sent_filename
# importing signal(filenames) to slot
def _on_sent_filenames(self, sent_filenames):
self.filenames = sent_filenames
# importing signal(FitsObj) to slot
def _on_sent_fitsobj(self, sent_fitsobj):
self.fitsobj = sent_fitsobj
if self.fitsobj.z_guess is not None:
# replace estZ if z_guess is available
self.estZ.setText(str(self.round_to_sigfig(self.fitsobj.z_guess, 3)))
self.send_message.emit('Redshift posterior is found in the FITS file!')
# action to "Add to Table below"
# log available values to DataFrame
def _on_button_clicked(self, sfilename):
if len(self.estZ.text().strip()) < 1:
self.estZ.setText('nan')
if len(self.estZstd.text().strip()) < 1:
self.estZstd.setText('nan')
if len(self.conf.text().strip()) < 1:
self.conf.setText('0')
if len(self.flag.text().strip()) < 1:
self.flag.setText('No comments')
# prepare exporting data
data = {'Name': self.filename,
'z': self.newz[0], #float(self.estZ.text()),
'z_err': self.newz[1], #float(self.estZstd.text()),
'Confidence': float(self.conf.text()),
'Linelist': self.l_lln.currentText(),
'Flag': self.flag.text()}
# add coordiantes if they are available
if self.fitsobj.ra is not None:
data.update({'RA': self.fitsobj.ra,
'DEC': self.fitsobj.dec})
# add z_guess if it is available
if self.fitsobj.z_guess is not None:
data.update({'z_guess': self.fitsobj.z_guess})
# export data to DataFrame table
self.send_data.emit(data)
# importing data from table to slot
def _on_sent_dictdata(self, sent_dict):
#print(self.filename)
#print(sent_dict)
# if received data is non-empty
# add data back to corresponding LineEdit widgets
if len(sent_dict) > 0:
#print(sent_dict['z'])
if not isnan(float(sent_dict['z'])):
# extract z_estimated from z column
if len(self.newz) < 2:
self.newz.append(float(sent_dict['z']))
self.newz.append(float(sent_dict['z_err']))
else:
self.newz[0] = float(sent_dict['z'])
self.newz[1] = float(sent_dict['z_err'])
show_sigfig = 5
self.estZ.setText(str(self.round_to_sigfig(self.newz[0], show_sigfig)))
if not isnan(float(self.newz[1])):
self.estZstd.setText(str(self.round_to_sigfig(self.newz[1], show_sigfig)))
else:
self.estZstd.setText('nan')
self.send_message.emit('Found estimated z in table!')
elif not isnan(float(sent_dict['z_guess'])):
# extract z_estimated from z_guess column
if len(self.newz) < 2:
self.newz.append(float(sent_dict['z_guess']))
self.newz.append(0.)
else:
self.newz[0] = float(sent_dict['z_guess'])
self.newz[1] = nan
self.estZ.setText(str(self.newz[0]))
self.estZstd.setText(str(self.newz[1]))
self.conf.setText(str(sent_dict['Confidence']))
self.flag.setText(str(sent_dict['Flag']))
self.l_lln.setCurrentText(str(sent_dict['Linelist']))
self.send_message.emit("Can't find z in table! Use z_guess now..")
else:
# sent_dict data is empy, reset everythin
self.estZ.clear()
self.estZstd.clear()
self.newz = [nan, nan] # reset est_z and est_z_std back to nans
self.conf.clear()
self.flag.clear()
self.l_lln.setCurrentIndex(0)
# action to number of Gaussians selected
def _on_gauss_num_activated(self):
# exporting signals
self.send_gauss_num.emit(int(self.gauss_num.currentText()))
self.send_linelists2multiG.emit(self.LINELISTS)
# utility function to round values to desired significant figures
def round_to_sigfig(self, num=0., sigfig=1):
if num is not None:
tmp = log10(abs(num))
if isinf(tmp):
return 0
else:
return round(num, sigfig - int(floor(tmp)) - 1)
else:
return None
# load full content in all secondary linelist
def _addtional_linelist(self, i, s):
# i = index of the widget passing linelist
# s = name of the linelist
llist = | pd.DataFrame(columns=['wave', 'name']) | pandas.DataFrame |
# ###########################################################################
import os
import json
import requests
import pandas as pd
def load_california_electricity_demand(
filepath='data/demand.json',
api_key_env='EIA_API_KEY',
train_only=False):
data = read_or_download_data(filepath, api_key_env)
df = (
json_to_df(data)
.rename(columns={0: 'ds', 1: 'y'})
.assign(ds=utc_to_pst)
.assign(ds=lambda df: df.ds.dt.tz_localize(None))
.sort_values('ds')
)
if train_only:
df = remove_2019_and_later(df)
return df
def read_or_download_data(filepath, api_key_env):
if os.path.exists(filepath):
data = read_json(filepath)
else:
api_key = try_get_env(api_key_env)
response_json = fetch_california_demand(api_key)
write_json(response_json, filepath)
data = read_json(filepath)
return data
def read_json(file):
with open(file) as f:
data = json.load(f)
return data
def write_json(data, filepath):
with open(filepath, 'w') as file:
json.dump(data, file)
def try_get_env(api_key_env):
env = os.getenv(api_key_env)
if env:
return env
else:
print('Please provide a valid EIA_API_KEY environment variable.')
return None
def fetch_california_demand(api_key):
r = requests.get(
'http://api.eia.gov/series',
params={
'api_key': api_key,
'series_id': 'EBA.CAL-ALL.D.H',
'out': 'json'
}
)
return r.json()
def json_to_df(data):
df = | pd.DataFrame(data['series'][0]['data']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 22 04:30:07 2019
@author: akris
"""
import urllib.request
from bs4 import BeautifulSoup
import time
import pandas as pd
with open('PageCount-test.csv') as csv_file:
df_pageCount = pd.read_csv(csv_file)
words = []
links = []
for index, row in df_pageCount.iterrows():
alpha = row[0]
for page in range(1,row[1]+1):
time.sleep(3) #pause the code for 3 seconds
webURL = "https://www.merriam-webster.com/browse/dictionary/" + alpha + "/" + str(page)
#print(webURL)
while True:
try:
page = urllib.request.urlopen(webURL)
if page.getcode() == 200:
break
except Exception as inst:
print(inst)
soup = BeautifulSoup(page, 'html.parser')
s1 = soup.find('div',class_='entries')
for i in s1.find_all('a'):
words.append(i.find(text=True))
links.append(i.get('href'))
#end of for loop
#end of for loop
df = | pd.DataFrame(words,columns=['Words']) | pandas.DataFrame |
# Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
import argparse
import pandas as pd
import warnings
import os
import seaborn as sns
import matplotlib.pyplot as plt
import math
import numpy as np
from pathlib import Path
def loadAndPlotResults():
mainDir = Path(__file__).parents[1]
# load the expert data frame
# rename eval_types
replace_dict = {'avg_undiscounted_return': 'diff_avg_undiscounted_return',
'avg_discounted_return': 'diff_avg_discounted_return',
'avg_success_rate': 'diff_avg_success_rate',
'avg_boundaries_left': 'diff_avg_boundaries_left',
'avg_traj_length': 'diff_avg_traj_length',
'avg_norm_displacement': 'avg_norm_displacement',
}
dataframes = []
expert_dataframes = []
expertPlotFramePath = Path.joinpath(mainDir, "datasets/expert_plot.csv")
expert_df = pd.read_csv(str(expertPlotFramePath))
expert_df["algo"] = "True Environment"
# create set with all different expert eval types
expert_eval_types = set(expert_df["eval_type"])
expert_values = {}
for eval_type in expert_eval_types:
expert_values[eval_type] = expert_df[expert_df["eval_type"] == str(eval_type)]["value"].iloc[0]
expert_df = expert_df[expert_df.eval_type != 'avg_discounted_return']
expert_df = expert_df[expert_df.eval_type != 'avg_min_displacement']
expert_df = expert_df[expert_df.eval_type != 'avg_fixed_action_min_displacement']
expert_df = expert_df[expert_df.eval_type != 'avg_fixed_action_displacement']
expert_df = expert_df[expert_df.eval_type != 'avg_displacement']
expert_df = expert_df[expert_df.eval_type != 'avg_boundaries_left']
# add the expert df as the last
for eval_type in expert_eval_types:
if not (eval_type == 'avg_displacement' or eval_type == 'avg_norm_displacement'):
expert_df["value"][expert_df[expert_df["eval_type"] == eval_type].index] = expert_df["value"][
expert_df[expert_df["eval_type"] == eval_type].index].apply(
lambda x: math.fabs(expert_values[eval_type] - x))
expert_df["eval_type"][expert_df[expert_df["eval_type"] == eval_type].index] = \
expert_df["eval_type"][
expert_df[expert_df["eval_type"] == eval_type].index].apply(
lambda x: replace_dict[x] if (x in replace_dict) else x)
dataframes.append(expert_df)
algorithms = ["GAML", "MLE", "MultipleShooting", "SingleShooting"]
for algo in algorithms:
if algo =="GAML":
resultsPath = Path.joinpath(mainDir, "results/GAML-cartpole-swingup/")
itr = 200
elif algo == "MLE":
resultsPath = Path.joinpath(mainDir, "results/MLE-cartpole-swingup/")
itr = 2500
elif algo == "MultipleShooting":
resultsPath = Path.joinpath(mainDir, "results/PE-MultipleShooting-cartpole-swingup/")
itr = 3000
elif algo == "SingleShooting":
resultsPath = Path.joinpath(mainDir, "results/PE-SingleShooting-cartpole-swingup/")
itr = 3000
path = str(resultsPath)
# load the results data frame for algorithm
plot_frame_files = [filename for filename in os.listdir(path) if filename.startswith("results_plot_frame_")]
if len(plot_frame_files) == 0:
continue
if len(plot_frame_files) > 1:
newest_date = os.path.getmtime(path + "/" + plot_frame_files[0])
open_file_path = path + "/" + plot_frame_files[0]
# we have multiple files need to select the right one to plot
for plot_frame_file in plot_frame_files:
date = os.path.getmtime(path + "/" + plot_frame_file)
if newest_date < date:
newest_date = os.path.getmtime(path + "/" + plot_frame_file)
open_file_path = path + "/" + plot_frame_file
print("found file", open_file_path)
df = pd.read_csv(open_file_path)
else:
print("found file", plot_frame_files[0])
df = pd.read_csv(path + "/" + plot_frame_files[0])
df["algo"] = algo
# filter extreme values
df["value"][df[df["eval_type"] == 'kl_div'].index] = df["value"][df[df["eval_type"] == 'kl_div'].index].apply(
lambda x: min(x, 50))
df["value"][df[df["eval_type"] == 'inverse_kl_div'].index] = df["value"][
df[df["eval_type"] == 'inverse_kl_div'].index].apply(lambda x: min(x, 50))
df["value"][df[df["eval_type"] == 'expected_kl_div_to_learned_model'].index] = df["value"][
df[df["eval_type"] == 'expected_kl_div_to_learned_model'].index].apply(lambda x: min(x, 50))
df["value"][df[df["eval_type"] == 'pseudo_KL'].index] = df["value"][
df[df["eval_type"] == 'pseudo_KL'].index].apply(lambda x: max(x, -50))
df = df[df.eval_type != 'avg_discounted_return']
df = df[df.eval_type != 'avg_min_displacement']
df = df[df.eval_type != 'avg_fixed_action_min_displacement']
df = df[df.eval_type != 'avg_fixed_action_displacement']
df = df[df.eval_type != 'avg_displacement']
df = df[df.eval_type != 'avg_success_rate']
df = df[df.eval_type != 'avg_boundaries_left']
# we want absolute difference to expert environment
# rename eval_types
for eval_type in expert_eval_types:
if not (eval_type == 'avg_displacement' or eval_type == 'avg_norm_displacement'):
df["value"][df[df["eval_type"] == eval_type].index] = df["value"][
df[df["eval_type"] == eval_type].index].apply(lambda x: math.fabs(expert_values[eval_type] - x))
df["eval_type"][df[df["eval_type"] == eval_type].index] = df["eval_type"][
df[df["eval_type"] == eval_type].index].apply(lambda x: replace_dict[x] if (x in replace_dict) else x)
dataframes.append(df)
# put all dataframes in one frame and plot it
result = pd.concat(dataframes)
colOrder = ["avg_norm_displacement", "diff_avg_undiscounted_return", "diff_avg_traj_length"]
g = sns.FacetGrid(result, col="eval_type", sharey=True, sharex=False, col_order=colOrder)
g.map(sns.barplot, "value", "algo", palette="deep")
# set custom title
ax = g.axes.flatten()
for a, label in zip(ax, ["Average displacement", "Difference in average undiscounted return", "Difference in average trajectory length"]):
a.set_title(label, {'fontsize': 9})
plt.tight_layout(pad=2.5, w_pad=5.0)
plt.show()
if __name__ == '__main__':
warnings.filterwarnings(action='once')
parser = argparse.ArgumentParser()
parser.add_argument('--f', type=str, default='results/', help='folder name')
parser.add_argument('--env', type=str)
parser.add_argument('--load_csv', action='store_true')
args = parser.parse_args()
experiment_master_folder_name = args.f
num_seeds = 0
num_experiments = 0
# count the seeds
# find all trials of the experiment
for fn in os.listdir(experiment_master_folder_name):
# need to go one deeper
subfolder = os.path.join(experiment_master_folder_name, fn)
if not os.path.exists(subfolder) or not os.path.isdir(subfolder):
continue
for fn2 in os.listdir(subfolder):
path = os.path.join(subfolder, fn2)
if not os.path.exists(path) or not os.path.isdir(path):
continue
num_seeds += 1
num_experiments += 1
print("total number of seeds:", num_seeds)
print("num_experiments:", num_experiments)
# rename eval_types
replace_dict = {'avg_undiscounted_return': 'diff_avg_undiscounted_return',
'avg_discounted_return': 'diff_avg_discounted_return',
'avg_success_rate': 'diff_avg_success_rate',
'avg_boundaries_left': 'diff_avg_boundaries_left',
'avg_traj_length': 'diff_avg_traj_length',
'avg_norm_displacement': 'avg_displacement',
}
dataframes = []
expert_dataframes = []
#types = ["GAML", "MLE"]
# types = ["MLE", "GAML", "SingleShooting", "MultipleShooting20"]
#types = ["MultipleShooting20", "MultipleShooting50", "MultipleShooting100", "MultipleShooting250", "SingleShooting"]
# types = ["GAML", "MLE", "MultipleShooting500", "MultipleShooting250", "MultipleShooting50", "MultipleShooting20", "SingleShooting"]
#types = ["MLE", "SingleShooting", "MultipleShooting250", "MultipleShooting100", "MultipleShooting50", "MultipleShooting20","GAML", "GAML2"]
# types = ["MLE", "SingleShooting", "MultipleShooting50", "MultipleShooting100", "MultipleShooting250",
# "MultipleShooting20", "GAML"]
#types = ["GAML", "MLE", "MultipleShooting20", "MultipleShooting50", "MultipleShooting100", "MultipleShooting250", "SingleShooting"]
if args.env == 'cartpole':
# types = ["MultipleShooting20", "GAML", "MLE", "SingleShooting", "MultipleShooting50", "MultipleShooting250",
# "MultipleShooting100"]
# types = ["GAML", "MLE", "MultipleShooting20", "MultipleShooting50", "MultipleShooting100",
# "MultipleShooting250", "SingleShooting",]
types = ["GAML", "MLE", "MultipleShooting", "SingleShooting",]
elif args.env == 'furuta':
types = ["GAML", "SingleShooting", "MLE", "MultipleShooting500", "MultipleShooting250", "MultipleShooting50",
"MultipleShooting20"]
types = ["GAML", "SingleShooting", "MLE", "MultipleShooting"]
experiment_itr = 0
read_expert = False
if args.load_csv:
result = | pd.read_csv(experiment_master_folder_name + "results_plot.csv", sep=',', encoding='utf-8') | pandas.read_csv |
import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
import torch.optim as optim
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import matplotlib.pyplot as plt
class Decomposer(nn.Module):
def __init__(self, n_farms, n_crops, n_dims):
super().__init__()
self.V = nn.Parameter(torch.randn(n_farms, n_dims, requires_grad=True).to(device))
self.U = nn.Parameter(torch.randn(n_crops, n_dims, requires_grad=True).to(device))
def forward(self):
return self.V.mm(self.U.T).to(device)
if __name__ == "__main__":
#######################################
### DEFINE EXECUTION PARAMETERS ###
#######################################
# Define matrix name
filename = 'matrix_20_5_20_0.25.csv'
# Define matrix size
n_grid = 20
max_dims = 10
n_crops = 20
# Define training hyperparms
epochs = 5000
low = 0.1
high = 0.9
n_regimes = 9
# Set manual seeds
torch.manual_seed(42)
np.random.seed(42)
#######################################
#######################################
model_dir = os.path.join('stand_models_results', filename[:-4])
os.mkdir(model_dir)
filepath = os.path.join('data', filename)
matrix = | pd.read_csv(filepath, index_col=0) | pandas.read_csv |
"""
query reddit ES index
"""
import datetime
import sys
import os
import re
import logging
import pandas as pd
# import pytz
# from dateutil import parser
sys.path.insert(1, "/home/yongfeng/wissee_ai_projects/")
from common.utils import log_util
from common.utils.elasticsearch_helper import scroll_search
from common.data_streaming.reddit_trending_tickers.utils.process_tickers import ProcessTickers
from common.utils.reddit_helper.RedditQueryCompiler import RedditQueryCompiler
from common.utils.time_helper import convert_time
from datetime import date, timedelta
script_name = "query_es_reddit_postings"
def query_corpus_by_ticker(ticker: str = None,
start_time: str = '2020-01-01',
end_time: str = '2021-01-26'):
"""
query corpus by ticker from es: reddit_postings
:param end_time:
:param start_time:
:param ticker:
renamed 'query_corpus_by_ticker' to query_corpus
"""
logger = logging.getLogger(script_name)
index = 'reddit_postings'
T = ProcessTickers()
# compile ticker keywords
def filter_by_keyword_match(text):
if keyword_regex_case_sensitive and keyword_regex_case_insensitive:
if re.search(keyword_regex_case_sensitive, text) or re.search(keyword_regex_case_insensitive, text):
return True
elif keyword_regex_case_sensitive:
if re.search(keyword_regex_case_sensitive, text):
return True
elif keyword_regex_case_insensitive:
if re.search(keyword_regex_case_insensitive, text):
return True
return False
if ticker:
keywords = T.get_ticker_keywords(ticker=ticker)
else:
keywords = []
# print("no keywords found, return None")
# return
if len(keywords) > 0:
regex_patterns = T.compile_ticker_regex(ticker=ticker)
keyword_regex_case_sensitive = regex_patterns.get("case_sensitive", None)
# keyword_regex_case_sensitive = None
keyword_regex_case_insensitive = regex_patterns.get("case_insensitive", None)
else:
keyword_regex_case_sensitive = None
keyword_regex_case_insensitive = None
## compile search query
msg = "ticker is {}, keywords is {}".format(ticker, keywords)
log_util.info(logger=logger, msg=msg)
reddit_query_compiler = RedditQueryCompiler(keywords=keywords,
start_time=start_time,
end_time=end_time)
full_query = reddit_query_compiler.full_query
#### enable scroll search
records = scroll_search.scroll_search(query=full_query, index=index, page_size=1000, explain=False)
data = | pd.DataFrame(records) | pandas.DataFrame |
import os
import json
from time import sleep
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import minimize, basinhopping
from scipy.special import gamma
from tqdm import tqdm
try:
import cupy as _p
from cupy import asnumpy
from cupyx.scipy.ndimage.filters import convolve as cuda_conv
from gzbuilder_analysis.rendering.cuda.sersic import sersic2d
def convolve(render, psf, **kwargs):
return cuda_conv(render, psf, mode='mirror')
except ModuleNotFoundError:
_p = np
asnumpy = np.asarray
from scipy.signal import convolve2d
from gzbuilder_analysis.rendering.sersic import sersic2d
def convolve(render, psf, **kwargs):
return convolve2d(render, psf, mode='same', boundary='symm')
from gzbuilder_analysis.rendering.sersic import _b
warnings.simplefilter('ignore', RuntimeWarning)
def sersic_ltot(I, Re, n, gamma=gamma):
return (
2 * np.pi * I * Re**2 * n
* np.exp(_b(n)) / _b(n)**(2 * n)
* gamma(2 * n)
)
def sersic_I(L, Re, n, gamma=gamma):
return L / (
2 * np.pi * Re**2 * n
* np.exp(_b(n)) / _b(n)**(2 * n)
* gamma(2 * n)
)
def gen_grid(shape, oversample_n):
x = _p.linspace(
0.5 / oversample_n - 0.5,
shape[1] - 0.5 - 0.5 / oversample_n,
shape[1] * oversample_n
)
y = _p.linspace(
0.5 / oversample_n - 0.5,
shape[0] - 0.5 - 0.5 / oversample_n,
shape[0] * oversample_n
)
return _p.meshgrid(x, y)
def bulge_disk_render(
cx, cy,
mux=0, muy=0, Re=1, q=1, I=1, roll=0,
bulge_dx=0, bulge_dy=0, bulge_scale=0.1, bulge_q=1, bulge_roll=0,
bulge_frac=0.1, bulge_n=1
):
if I == 0 or Re == 0:
disk = _p.zeros(cx.shape)
bulge = _p.zeros(cx.shape)
else:
# sersic2d(x, y, mux, muy, roll, Re, q, c, I, n)
disk = sersic2d(cx, cy, mux, muy, roll, Re, q, 2, I, 1)
if bulge_scale == 0 or bulge_frac == 0:
bulge = _p.zeros(cx.shape)
else:
disk_l = sersic_ltot(I, Re, 1)
comp_l = disk_l * bulge_frac / (1 - bulge_frac)
bulge_I = sersic_I(comp_l, bulge_scale * Re, bulge_n)
bulge = sersic2d(
cx, cy,
mux + bulge_dx, muy + bulge_dy,
bulge_roll, bulge_scale * Re,
bulge_q, 2, bulge_I, bulge_n
)
return (disk + bulge)
def downsample(render, oversample_n, size):
return render.reshape(
size[0], oversample_n, size[1], oversample_n
).mean(3).mean(1)
fm = pd.read_pickle('lib/fitting_metadata.pkl')
lims_df = pd.DataFrame(dict(
mux=[-np.inf, np.inf],
muy=[-np.inf, np.inf],
Re=[0, np.inf],
q=[0.2, 1],
I=[0, np.inf],
roll=[-np.inf, np.inf],
bulge_dx=[-np.inf, np.inf],
bulge_dy=[-np.inf, np.inf],
bulge_scale=[0, 1],
bulge_q=[0.4, 1],
bulge_roll=[-np.inf, np.inf],
bulge_frac=[0, 0.95],
bulge_n=[0.6, 8],
), index=('lower', 'upper')).T
class BasinhoppingBounds(object):
def __init__(self, lims):
self.lims = lims
def __call__(self, **kwargs):
x = kwargs["x_new"]
tmax = np.all(x <= self.lims['upper'].values)
tmin = np.all(x >= self.lims['lower'].values)
return tmax and tmin
lims = BasinhoppingBounds(lims_df)
with tqdm(fm.index, desc='Fitting subjects') as bar:
for subject_id in bar:
bar.set_description(f'Fitting subjects (minimize) ')
# subject_id = 21686598
if not os.path.isfile(f'2comp_fits_nb4/minima/{subject_id}.csv'):
target = fm.loc[subject_id]['galaxy_data']
cp_mask = _p.asarray(target.mask)
cp_target = _p.asarray(target.data)
cp_psf = _p.asarray(fm['psf'][subject_id])
cp_sigma = _p.asarray(fm['sigma_image'][subject_id].data)
p0 = pd.Series(dict(
mux=target.shape[1] / 2,
muy=target.shape[1] / 2,
Re=20,
q=1,
I=0.8,
roll=0,
# bulge_dx=0,
# bulge_dy=0,
bulge_scale=0.2,
bulge_q=1,
bulge_roll=0,
bulge_frac=0.2,
# bulge_n=2,
))
oversample_n = 5
cx, cy = gen_grid(target.shape, oversample_n)
ndof = len(target.compressed())
def _f(p):
kw = {k: v for k, v in zip(p0.index, p)}
kw.setdefault('bulge_n', 4)
render = bulge_disk_render(cx, cy, **kw)
downsampled_render = downsample(render, oversample_n, size=target.shape)
psf_conv_render = convolve(downsampled_render, cp_psf)
diff = psf_conv_render[~cp_mask] - cp_target[~cp_mask]
chisq = asnumpy(_p.sum((diff / cp_sigma[~cp_mask])**2) / ndof)
return chisq
gradient_descent_res = minimize(
_f,
p0,
bounds=lims_df.reindex(p0.index).values,
)
p_gd = pd.Series(gradient_descent_res['x'], index=p0.index)
bar.set_description(f'Fitting subjects (basinhopping)')
minima = pd.DataFrame(columns=(*p0.index, 'chisq', 'accepted'))
def save_minima(x, f, accepted):
minimum = | pd.Series(x, index=p0.index) | pandas.Series |
#!/usr/bin/env python
# Imports
import gzip
import os
import numpy as np
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pickle
import time
import math
from collections import Counter, defaultdict
# Keras imports
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution1D, Flatten, MaxPooling1D
from keras.optimizers import Adam
from keras.models import load_model
from keras import backend as K
from mcfly import modelgen, find_architecture
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.utils import shuffle
from sklearn.metrics import f1_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import label_binarize
# TensorFlow import
import tensorflow as tf
# Pandas import
import pandas as pd
import argparse
#import altair as alt
# Bokeh import
#from bokeh.io import show, output_file
#from bokeh.plotting import figure
import logging
def get_classes(labels):
return sorted(list(set(labels)))
def get_channel_labels():
# Fill labels for legend
labels = []
with open("Channel_Labels.txt","r") as inlab:
for line in inlab:
line = line.strip()
labels += [line]
return labels
def data(datapath):
dataset_type = '_balanced'
data_input_file = datapath
npzfiles = np.load(data_input_file)
X = npzfiles['X']
y = npzfiles['y']
y_binary = npzfiles['y_binary']
win_ids = npzfiles['ids']
# logging.info(X.shape)
# logging.info(y.shape)
# logging.info(y.shape)
# logging.info(win_ids.shape)
# idx = np.arange(0,9)
# idx = np.append(idx, np.arange(33,35))
# idx = np.append(idx, np.arange(41, 44))
# idx = np.append(idx,[12,16,20,24,28,32])
return X, y, y_binary, win_ids
def create_model(X, y_binary, lr, drp1, drp2):
models = modelgen.generate_models(X.shape,
y_binary.shape[1],
number_of_models = 1,
model_type = 'CNN',
cnn_min_layers=2,
cnn_max_layers=2,
cnn_min_filters = 4,
cnn_max_filters = 4,
cnn_min_fc_nodes=6,
cnn_max_fc_nodes=6,
low_lr=lr, high_lr=lr,
low_reg=1, high_reg=1,
kernel_size=7, max_drp_out1=drp1,
max_drp_out2=drp2,
min_drp_out1=drp1,
min_drp_out2=drp2)
# models = modelgen.generate_models(X.shape,
# y_binary.shape[1],
# number_of_models = 1,
# model_type = 'CNN',
# cnn_min_layers=4,
# cnn_max_layers=4,
# cnn_min_filters = 6,
# cnn_max_filters = 6,
# cnn_min_fc_nodes=12,
# cnn_max_fc_nodes=12,
# low_lr=2, high_lr=2,
# low_reg=1, high_reg=1,
# kernel_size = 7)
# i = 0
# for model, params, model_types in models:
# logging.info('model ' + str(i))
# i = i + 1
# logging.info(params)
# model.summary()
return models
def cross_validation(X, y, y_binary, channels, X_test, y_test, y_binary_test, output_dir_test, win_ids_test, split, epochs, lr, drp1, drp2):
results = pd.DataFrame()
X, y_binary = shuffle(X, y_binary, random_state=0)
xtrain, xval, ytrain_binary, yval = train_test_split(X, y_binary,
test_size=split, random_state=2)
#print(xtrain.shape)
#print(xval.shape)
#print(ytrain_binary.shape)
#print(yval.shape)
#print(ytrain_binary)
#print(yval)
for i in range(0, 10):
logging.info("Training model " + str(i + 1) + "/10...")
output_iter_dir = output_dir_test+'/Training_Iteration_' + str(i + 1)
if not os.path.isdir(output_iter_dir):
os.mkdir(output_iter_dir)
# Clear model, and create it
model = None
model = create_model(X, y_binary, lr, drp1, drp2)
# Debug message I guess
logging.info ("Training new iteration on " + str(xtrain.shape[0]) + " training samples, " +
str(xval.shape[0]) + " validation samples, this may take a while...")
history, model = train_model(model, xtrain, ytrain_binary, xval, yval, epochs, i)
model.save(output_iter_dir+"/Best_Model_Iteration_"+str(i+1)+".h5")
with open(output_iter_dir+'/Best_Model_History_Iteration_'+str(i+1), 'wb') as file_pi:
pickle.dump(history.history, file_pi)
accuracy_history = history.history['acc']
val_accuracy_history = history.history['val_acc']
logging.info("Last training accuracy: " + str(accuracy_history[-1]) + ", last validation accuracy: " + str(
val_accuracy_history[-1]))
test_start = time.time()
score_test = model.evaluate(X_test, y_binary_test, verbose=False)
test_end = time.time()
test_time = test_end - test_start
logging.info("TESTTIME2: Test time Iteration " + str(i + 1) + ": " + str(test_time))
logging.info('Test loss and accuracy of best model: ' + str(score_test))
results, probs = evaluate_model(model, X_test, y_test, y_binary_test, results, i, channels, output_iter_dir, epochs, hist=history.history,
train_set_size=xtrain.shape[0],
validation_set_size=xval.shape[0])
with open(output_iter_dir+"/Called_Test_SVs.txt", "w") as out_sv:
out_sv.write("Chromosome\tStart\tEnd\tProbs[DEL]\tProbs[No_DEL]\n")
for k in range(0, len(win_ids_test)):
out_sv.write("\t".join(["\t".join(win_ids_test[k].split("_")), str(probs[k][0]), str(probs[k][1])])+"\n")
return results
def train_model(model, xtrain, ytrain, xval, yval, epochs, i):
train_set_size = xtrain.shape[0]
#print(xtrain.shape)
#print(ytrain.shape)
#print(xval.shape)
#print(yval.shape)
histories, val_accuracies, val_losses = find_architecture.train_models_on_samples(xtrain, ytrain,
xval, yval,
model, nr_epochs=epochs,
subset_size=train_set_size,
verbose=False)
best_model_index = np.argmax(val_accuracies)
best_model, best_params, best_model_types = model[best_model_index]
#logging.info(best_model_index, best_model_types, best_params)
nr_epochs = epochs
train_start = time.time()
history = best_model.fit(xtrain, ytrain,
epochs=nr_epochs, validation_data=(xval, yval),
verbose=False)
train_end = time.time()
train_time = train_end - train_start
logging.info("TRAINTIME: Training time Iteration " + str(i + 1) + ": " + str(train_time))
return history, best_model
def evaluate_model(model, X_test, y_test, ytest_binary, results, cv_iter, channels, output_dir, epochs, hist,
train_set_size, validation_set_size):
#Generate classes
classes = sorted(list(set(y_test)))
mapclasses = dict()
for i, c in enumerate(classes):
mapclasses[c] = i
dict_sorted = sorted(mapclasses.items(), key=lambda x: x[1])
#print(dict_sorted)
# logging.info(dict_sorted)
class_labels = [i[0] for i in dict_sorted]
#print(class_labels)
n_classes = ytest_binary.shape[1]
# logging.info(ytest_binary)
# logging.info(n_classes)
test_start = time.time()
probs = model.predict_proba(X_test, batch_size=1, verbose=False)
test_end = time.time()
test_time = test_end - test_start
logging.info("TESTTIME: Test time Iteration " + str(cv_iter + 1) + ": " + str(test_time))
# generate confusion matrix
labels = sorted(list(set(y_test)))
predicted = probs.argmax(axis=1)
y_index = ytest_binary.argmax(axis=1)
confusion_matrix = pd.crosstab(pd.Series(y_index), pd.Series(predicted))
confusion_matrix.index = [labels[i] for i in confusion_matrix.index]
confusion_matrix.columns = [labels[i] for i in confusion_matrix.columns]
confusion_matrix.reindex(columns=[l for l in labels], fill_value=0)
logging.info(confusion_matrix)
confusion_matrix.to_csv(path_or_buf=output_dir+'/NA12878_confusion_matrix_cv_iter_' + str(cv_iter + 1) + '.csv')
# print(np.diag(confusion_matrix))
# print(confusion_matrix.sum(axis=1))
print(confusion_matrix)
# logging.info('Precision: %d' % int(np.diag(confusion_matrix) / confusion_matrix.sum(axis=1) * 100))
# logging.info('Recall: %d' % int(np.diag(confusion_matrix)/confusion_matrix.sum(axis=0)*100))
# For each class
precision = dict()
recall = dict()
f1 = dict()
average_precision = dict()
#average_f1 = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(ytest_binary[:, i],
probs[:, i])
f1[i] = (precision[i]*recall[i])/(precision[i]+recall[i])
average_precision[i] = average_precision_score(ytest_binary[:, i], probs[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(ytest_binary.ravel(),
probs.ravel())
average_precision["micro"] = average_precision_score(ytest_binary, probs,
average="micro")
#average_f1["micro"] = f1_score(ytest_binary, probs, average="micro")
logging.info('Average precision score, micro-averaged over all classes: {0:0.2f}'.format(average_precision["micro"]))
for key in f1:
logging.info("Iteration_"+str(cv_iter+1)+" - F1Score_"+str(key)+": "+str(f1[key]))
results = results.append({
"channels": channels,
"iter": cv_iter+1,
"training_set_size": train_set_size,
"validation_set_size": validation_set_size,
"test_set_size": X_test.shape[0],
"average_precision_score": average_precision["micro"]}, ignore_index=True)
plt.figure()
plt.step(recall['micro'], precision['micro'], color='b', alpha=0.2,
where='post')
plt.fill_between(recall["micro"], precision["micro"], alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title(
'Average precision score, micro-averaged over all classes: AP={0:0.2f}'
.format(average_precision["micro"]))
plt.savefig(output_dir+'/Precision_Recall_avg_prec_score_Iter_'+str(cv_iter)+'_'+channels+'.png', bbox_inches='tight')
plt.close()
from itertools import cycle
# setup plot details
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y >= 0], y[y >= 0], color='gray', alpha=0.2)
plt.annotate('f1={0:0.1f}'.format(f_score), xy=(0.9, y[45] + 0.02))
lines.append(l)
labels.append('iso-f1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = plt.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(class_labels[i], average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(lines, labels, loc=(0, -.38), prop=dict(size=14))
plt.savefig(output_dir+'/Precision_Recall_avg_prec_score_per_class_Iter_' +
str(cv_iter) +'_'+channels+'.png', bbox_inches='tight')
plt.close()
history = hist
acc = history["acc"]
loss = history["loss"]
val_acc = history["val_acc"]
val_loss = history["val_loss"]
x = range(1, len(acc)+1)
#print(x)
#print(acc)
fig, ax1 = plt.subplots()
ax1.set_xlabel('Epoch')
ax1.set_ylabel('Accuracy', color='r')
plt.plot(x, acc, color='red', label="Acc")
plt.plot(x, val_acc, color='lightcoral', label="Val_Acc")
ax1.tick_params(axis='y', color='r', labelcolor='r')
ax2 = ax1.twinx()
ax2.set_ylabel('Loss', color='blue')
plt.plot(x, loss, color='blue', label="Loss")
plt.plot(x, val_loss, color='lightblue', label="Val_Loss")
ax2.tick_params(axis='y', color='blue', labelcolor='blue')
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
plt.legend(h1 + h2, l1 + l2, bbox_to_anchor=(0.5, -0.1))
fig.tight_layout()
plt.savefig(output_dir+"/BestModelHistory_Plot.png", dpi=300, format='png')
plt.close()
# for iter_class in mapclasses.values():
#
# predicted = probs.argmax(axis=1)
# #logging.info(predicted)
# y_pred_class = np.array([1 if i == iter_class else 0 for i in predicted])
# #logging.info(y_pred_class)
#
# # keep probabilities for the positive outcome only
# probs_class = probs[:, iter_class]
# #logging.info(probs_class)
#
# #logging.info(y_test)
#
# y_test_class = np.array([1 if i[iter_class] == 1 else 0 for i in ytest_binary])
#
# # calculate precision-recall curve
# precision, recall, thresholds = precision_recall_curve(y_test_class, probs_class)
# # calculate F1 score
# f1 = f1_score(y_test_class, y_pred_class)
# # calculate precision-recall AUC
# auc_value = auc(recall, precision)
# # calculate average precision score
# ap = average_precision_score(y_test_class, probs_class)
# logging.info('f1=%.3f auc=%.3f average_precision_score=%.3f' % (f1, auc_value , ap))
# # plot no skill
# plt.plot([0, 1], [0.5, 0.5], linestyle='--')
# # plot the roc curve for the model
# plt.plot(recall, precision, marker='.')
# # show the plot
# plt.savefig('Plots/Precision_Recall_multiclass_Iter_'+str(cv_iter)+'_'+channels+'.png', bbox_inches='tight')
return results, probs
def run_cv(output_dir_test, datapath_training, datapath_test, split, epochs, lr, drp1, drp2):
labels = get_channel_labels()
results = pd.DataFrame()
channels = 'all'
logging.info('Running cv with '+channels+' channels:')
for i, l in enumerate(labels):
logging.info(str(i) + ':' + l)
# Load the data
X, y, y_binary, win_ids = data(datapath_training)
X_test, y_test, y_binary_test, win_ids_test = data(datapath_test)
results = results.append(cross_validation(X, y, y_binary, channels, X_test, y_test, y_binary_test, output_dir_test, win_ids_test, split, epochs, lr, drp1, drp2))
logging.info(results)
results.to_csv(output_dir_test+"/CV_results.csv", sep='\t')
def plot_results(output_dir_test):
source = | pd.read_csv(filepath_or_buffer=output_dir_test+'/CV_results.csv', delimiter='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None]})
result = df.all(bool_only=True)
expected = Series(dtype=np.bool)
tm.assert_series_equal(result, expected)
df = DataFrame({"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True]})
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH 21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level='out')
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH 4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# GH 16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH 4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH 16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': np.nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# GH 12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
| tm.assert_frame_equal(out, expected) | pandas.util.testing.assert_frame_equal |
from datetime import date as dt
import pandas as pd
import requests
import bandl.common
#default periods
DEFAULT_DAYS = 250
def is_ind_index(symbol):
is_it = symbol in bandl.common.IND_INDICES
return is_it
def get_formated_date(date=None,format=None,dayfirst=False):
"""string date to format date
"""
try:
if not date:
date = dt.today()
date_time = | pd.to_datetime(date,dayfirst=dayfirst) | pandas.to_datetime |
from __future__ import print_function
import argparse
import joblib
import os
import pandas as pd
import logging
import numpy as np
from sklearn.linear_model import Ridge, RidgeCV, LassoCV, Lasso
from sklearn.model_selection import cross_val_score
def train(args, train_X, train_y, model):
'''
아래 모델의 타입에 따라 해당 모델을 학습하고 모델을 리턴한다.
ridge_model = train(train_X, train_y, model="ridge")
'''
print("alpha value in train: ", args.alpha)
if model =='ridge':
model = Ridge(alpha= args.alpha)
print("Train Ridge model")
elif model == 'lasso':
model = Lasso(alpha=1.0)
print("Train Lasso model")
model.fit(train_X, train_y)
return model
def save_model(model, model_folder, model_name):
'''
모델을 해당 위치에 저장한다.
'''
save_path = os.path.join(model_folder, model_name)
joblib.dump(model, save_path)
print(f'{save_path} is saved')
def handle_input_data(args):
'''
훈련 데이터를 X, y 로 변환하여 리턴
'''
# Take the set of files and read them all into a single pandas dataframe
input_files = [ os.path.join(args.train, file) for file in os.listdir(args.train) ]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the channel ({}) was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.').format(args.train, "train"))
raw_data = [ | pd.read_csv(file, header=None, engine="python") | pandas.read_csv |
from glob import glob
import os
import numpy as np
import scipy.io as sio
import lmfit
from lmfit import Model, models
from sklearn import linear_model as lm
from fragmenter import RegionExtractor as re
from niio import loaded, write
import plotnine
from plotnine import ggplot, geom_point, geom_density_2d, geom_line, aes, ggtitle, geom_boxplot, xlab
import matplotlib.pyplot as plt
import pandas as pd
"""
Set of methods for plotting the results of source-to-target
correlation vs. dispersion data.
csv2matrix:
Convert aggregated coefficient csv files to directed adjacency matrices.
plot_regional:
Visualize results of model fits using single region as source OR target
aggregate_model_coefficients:
Combines all source-target pair coefficients into single file.
plot_dispersion:
Visualize results of correlation as a function of spatial dispersion.
Fits a Power Law model to data and saves model coefficients.
pair:
Visualize results of correlation as function of spatial dispersion.
Plots scatterplot and 2d-density.
fit:
Sub-method. Fits Power Law model.
plot_model_fit:
Sub-method. Plots fitted model.
"""
def connectopyBy(subject_id, hemisphere, labeldir, conndir, nsize=5):
"""
Generate DataFrame objects for each source region. Contains the connectopy
data for the first two eigenvectors of the source region, along with the
pairwise correlation / dispersion data.
Parameters:
- - - - -
subject_id: string
Subject name
hemisphere: string
Hemisphere to process ['L', 'R']
labeldir: string
Directory where label files exist
conndir: string
Directory where connectopy maps exist
nsize: int
Mapping neighborhood size
"""
dir_out = '%sPlots/%s/' % (conndir, subject_id)
dir_evec = '%sDesikan/%s/' % (conndir, subject_id)
dir_func = '%sNeighborFunctional/%s/' % (conndir, subject_id)
dir_disp = '%sNeighborDistances/%s/' % (conndir, subject_id)
label_file = '%s%s.%s.aparc.32k_fs_LR.label.gii' % (labeldir, subject_id, hemisphere)
R = re.Extractor(label_file)
region_map = R.map_regions()
regions = list(region_map.keys())
regions.sort()
regions = [x for x in regions if x not in ['corpuscallosum']]
col_names = ['e1', 'e2'] + regions
for i, source_region in enumerate(regions):
func_df = pd.DataFrame(columns=col_names, index=None)
disp_df = pd.DataFrame(columns=col_names, index=None)
sinds = region_map[source_region]
evec_file = '%s%s.%s.%s.2.brain.Evecs.func.gii' % (
dir_evec, subject_id, hemisphere, source_region)
evecs = loaded.load(evec_file)
func_df['e1'] = evecs[sinds, 0]
func_df['e2'] = evecs[sinds, 1]
disp_df['e1'] = evecs[sinds, 0]
disp_df['e2'] = evecs[sinds, 1]
for j, target_region in enumerate(regions):
if source_region != target_region:
func_file = '%s%s.%s.%s.2.%s.mean_knn.func.gii' % (
dir_func, subject_id, hemisphere, source_region, target_region)
func = loaded.load(func_file)
func = func[sinds, nsize-1]
func_df[target_region] = func
disp_file = '%s%s.%s.Distance.2.%s.func.gii' % (
dir_disp, subject_id, hemisphere, target_region)
disp = loaded.load(disp_file)
disp = disp[sinds, nsize-1]
disp_df[target_region] = disp
else:
disp_df[target_region] = np.nan
func_df[target_region] = np.nan
out_func = '%s%s.%s.%s.Functional.csv' % (dir_out, subject_id, hemisphere, source_region)
out_dist = '%s%s.%s.%s.Dispersion.csv' % (dir_out, subject_id, hemisphere, source_region)
func_df.to_csv(out_func, index=False)
disp_df.to_csv(out_dist, index=False)
def csv2matrix(subject_id, hemisphere, modeldir, mtype):
"""
Convert aggregated model coefficients to matrix representation.
Rows of matrix represent source. Columns of matric represent target.
Parameters:
- - - - -
subject_id: string
Subject name
hemisphere: string
L/R
modeldir: string
Path where model coefficients are saved
mtype: string
type of model (Power, Exponent)
"""
subj_dir = '%s%s/' % (modeldir, subject_id)
coef_file = '%s%s.%s.%s.Fit.Coefficients.csv' % (
subj_dir, subject_id, hemisphere, mtype)
coefs = pd.read_csv(coef_file, index_col=False)
regions = list(coefs['target_region'].unique())
regions.sort()
nr = len(regions)
reg_map = dict(zip(regions, np.arange(nr)))
params = list(set(list(coefs.columns)).difference(
['Unnamed: 0', 'source_region', 'target_region',
'aic', 'bic', 'name']))
n, p = coefs.shape
for param in params:
temp_array = np.zeros((nr, nr))
for j in np.arange(n):
temp_data = coefs.iloc[j]
temp_source = temp_data['source_region']
temp_target = temp_data['target_region']
temp_array[reg_map[temp_source], reg_map[temp_target]] = temp_data[param]
temp = {param: temp_array}
out_matrix = '%s%s.%s.%s.%s' % (subj_dir, subject_id, hemisphere, mtype, param)
sio.savemat(file_name=out_matrix, mdict=temp)
def aggregate_model_fits(subject_id, hemisphere, modeldir, mtype):
"""
Aggregate the coefficients across all region pairs for a single subject.
Parameters:
- - - - -
subject_id: string
Subject name
hemisphere: string
L/R
modeldir: string
Oath where model coefficients are saved
mtype: string
Model type, in ['Power', 'Exponential', 'Linear']
"""
subj_dir = '%s%s/' % (modeldir, subject_id)
model_extension = '%s.%s.%s.Fit.Coefficients.*.2.*.csv' % (
subject_id, hemisphere, mtype)
model_files = glob('%s%s' % (subj_dir, model_extension))
m = [None]*len(model_files)
for i, model in enumerate(model_files):
temp = pd.read_csv(model, index_col=False)
m[i] = temp
models = pd.concat(m, sort=False)
models.index = np.arange(models.shape[0])
out_coefs = '%s%s.%s.%s.Fit.Coefficients.csv' % (
subj_dir, subject_id, hemisphere, mtype)
models.to_csv(out_coefs)
return models
def plot_dispersion(subject_id, region_1, region_2, dir_label, dir_func, dir_dist, hemisphere, outdir,
nsize=5):
"""
Method to generate functional connectivity dispersion plots.
Two plots are generated:
1. Density plots of dispersion (x) vs. correlation (y)
2. Fitted exponential regression of dispersion (x) vs. correlation (y)
Parameters:
- - - - -
subject_id: string
Subject name
region_1, region_2: strings
Names of regions for which to analyze functional dispersion
dir_label: string
Directory where label files are located
dir_func: string
Directory where neighborhood functional maps are located
dir_dist: string
Directory where neighborhood distance maps are located
outdir: string
Output directory
hemisphere: string
L/R
nsize: int
Size of neighborhood mapping to consider
Default = 5
"""
subj_outdir = '%s%s/' % (outdir, subject_id)
if not os.path.exists(subj_outdir):
os.mkdir(subj_outdir)
[_, X, y] = prep_data(subject_id, region_1, region_2, dir_label,
dir_func, dir_dist, hemisphere, nsize)
# g = density(X, y, region_1, region_2)
M = fit(X, y)
for model, mtype in zip(M, ['Exponential', 'Power', 'Linear']):
save_model(outdir, mtype, subject_id,
hemisphere, model, region_1, region_2)
[_, X, y] = prep_data(subject_id, region_2, region_1, dir_label,
dir_func, dir_dist, hemisphere, nsize)
# g = density(X, y, region_2, region_1)
M = fit(X, y)
for model, mtype in zip(M, ['Exponential', 'Power', 'Linear']):
save_model(outdir, mtype, subject_id,
hemisphere, model, region_2, region_1)
def density(X, y, sreg, treg):
"""
Plot the 2d-density of the size vs correlation data.
Parameters:
- - - - -
X: float, array
independent variable
y: float, array
dependent variable
Returns:
- - - -
g: figure
density plot
"""
df = pd.DataFrame({'Size': X,
'Correlation': y})
g = (ggplot(df, aes('Size', 'Correlation'))
+ geom_point(alpha=0.5, size=0.25)
+ geom_density_2d(size=1, color='r')
+ plotnine.ggtitle('Dispersion Correlations\n{:} --> {:}'.format(sreg, treg)))
return g
def prep_data(subject_id, sreg, treg, region_map, dir_func, dir_dist, hemisphere, nsize):
"""
Source and target distance and correlation data for modeling.
Parameters:
- - - - -
subject_id: string
Subject name
sreg, treg: string
source, target region pair
region_map: dictioary
mapping from region name to cortical indices
dir_func: string
Directory where Nearest Neighbor correlation maps exist
dir_dist: string:
Directory where Nearest Neighbor distance maps exist
hemisphere: string
Hemisphere to process, in ['L', 'R']
nsize: int
neighborhood size
Returns:
- - - -
inds: int, array
indices of source voxels
x: float, array
dispersion vector
y: float, array
correlation vector
"""
base_knn = '%s.%s.knn_mean.2.%s.func.gii' % (subject_id, hemisphere, treg)
in_knn = '%s%s/%s' % (dir_func, subject_id, base_knn)
knn = loaded.load(in_knn)
base_dist = '%s.%s.Distance.2.%s.func.gii' % (subject_id, hemisphere, treg)
in_dist = '%s%s/%s' % (dir_dist, subject_id, base_dist)
dist = loaded.load(in_dist)
source_indices = region_map[sreg]
nsize = nsize-1
x = dist[source_indices, nsize]
y = knn[source_indices, nsize]
inds = np.arange(len(source_indices))[~np.isnan(y)]
inds = inds[y[inds] != 0]
x = x[inds]
y = y[inds]
return [inds, x, y]
def save_model(dir_out, mtype, subject_id, hemisphere, model, sreg, treg):
"""
Method to save a model and plot the fit and residuals.
Parameters:
- - - - -
dir_out: string
Directory where model coefficients and plots are saved
mtype: string
Model type, in ['Power', 'Exponential', 'Linear']
subject_id: string
Subject name
hemisphere: string
Hemisphere to process, in ['L', 'R']
model: lmfit model object
fitted model
sreg, treg: string
source, target region pair
"""
[F, gridspec] = model.plot()
ax0 = F.axes[0]
ax1 = F.axes[1]
curr_title = ax0.get_title()
ax0_title = curr_title + ' Residuals'
ax1_tight = curr_title + ' Fit'
ext = '%s to %s\n' % (sreg, treg)
ax0.set_title(ext + ax0_title)
ax1.set_title(ext + ax1_tight)
F.tight_layout()
data_dict = {'source_region': [sreg],
'target_region': [treg]}
for k, v in model.params.valuesdict().items():
data_dict[k] = v
data_dict['aic'] = [model.aic]
data_dict['bic'] = [model.bic]
data_dict['name'] = [model.model.name]
df = | pd.DataFrame(data_dict) | pandas.DataFrame |
# coding: utf-8
# Author: <NAME>
import os
import sys
import traceback
from datetime import datetime
import pandas as pd
import numpy as np
import woe_tools as woe
usage = '''
################################### Summarize #######################################
此工具包用于数据预处理,包含以下内容:
1.Cap
2.Floor
3.MissingImpute
4.Woe
5.Normalize
6.Scale
7.Tactic
-------------------------------------------------------------------------------------
使用说明:
import pandas as pd
import numpy as np
import preprocess as pp
df_train = pd.read_csv('train_data.csv')
df_test = pd.read_csv('test_data.csv')
df_config = pd.read_csv('edd_config.csv')
# 调用单个组件:
operation = pp.MissingImpute(df_config)
df_reference = operation.fit(df_train)
df_train = operation.apply(df_train)
df_test = operation.apply(df_test)
# 设计整个数据预处理流程:
process = pp.Tactic(df_config, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
process.summary()
df_reference = process.fit(df_train)
df_train = process.apply(df_train)
df_test = process.apply(df_test)
process.save_reference('./edd_reference.csv')
# 也可以通过读入一个已经生成的reference table,直接对数据进行apply处理
df_reference = pd.read_csv('edd_reference.csv')
process = pp.Tactic(df_reference, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
df_train = process.apply(df_train)
df_test = process.apply(df_test)
---------------------------------------------------------------------------------------
注意事项:
1. 不要在数据存在缺失值的情况下进行woe处理;
2. 当处理流程中包含woe时,必须指定target,否则会报错;
3. 对于一个新的数据集,第一次做处理时最好分步进行预处理,方便检查每步的输出是否正确。
#######################################################################################
'''
def __map_feature_type(t, time_as_num=False):
"""
convert the dataFrame type to feature type (Numerical or Categorical)
"""
if t in (int, np.int64, np.int32, np.int16, bool, float, np.float32, np.float64, np.float128):
return 'numerical'
elif t in (str,):
return 'categorical'
elif t in (pd.tslib.Timestamp, ):
return 'numerical' if time_as_num else 'timestamp'
def __extract_feature_type(df, known_columns={}):
"""
extract columns type of a dataframe and map it
"""
col_list = []
for var in df.columns:
if var in known_columns:
col_list.append((var, known_columns[var]))
continue
var_type = __map_feature_type(df[var].dtype.type)
if var_type is not None:
col_list.append((var, var_type))
continue
type_set = set(df[var][~df[var].isnull()].apply(lambda x: type(x)))
if len(type_set) == 1:
var_type = __map_feature_type(type_set.pop())
if var_type is not None:
col_list.append((var, var_type))
continue
raise ValueError('Unknown type of column "{0}" as {1}'.format(var, type_set))
return col_list
def create_edd_config(df_master, known_columns={}, save_path=None):
"""
生成数据预处理的config文件
Parameters
----------
df_master:
DataFrame
known_columns: dict, default {}
已知的列类型,eg. {'age': 'numerical, 'sex': 'categorical'}
save_path: str, default None
Returns
-------
df_config: DataFrame
预处理的配置文件
"""
column_type = __extract_feature_type(df_master, known_columns=known_columns)
df_config = pd.DataFrame(column_type, columns=['Var_Name', 'Var_Type'])
df_config['Ind_Model'] = 1 # 是否进模型
df_config['Ind_Cap'] = 0 # 是否进行Cap处理
df_config['Cap_Value'] = None
df_config['Ind_Floor'] = 0 # 是否进行Floor处理
df_config['Floor_Value'] = None
df_config['Missing_Impute'] = -1 # 填入的缺失值,数值型变量默认为-1,字符变量默认为'missing'
df_config.loc[df_config['Var_Type'] == 'categorical', 'Missing_Impute'] = 'missing'
df_config['Ind_WOE'] = 0 # 是否做WOE变换,默认数值型变量不做变换,字符型变量做
df_config.loc[df_config['Var_Type'] == 'categorical', 'Ind_WOE'] = 1
df_config['WOE_Bin'] = None
df_config['Ind_Norm'] = 0 # 是否进行normalize
df_config['Ind_Scale'] = 0 # 是否进行min-max scale
for var in df_config['Var_Name'][df_config['Var_Type'] == 'numerical'].tolist():
if df_master[var].max() > (5 * df_master[var].quantile(0.99)):
df_config.loc[df_config['Var_Name'] == var, 'Ind_Cap'] = 1
df_config.to_csv(save_path, index=False, encoding='utf-8')
return df_config
class Cap(object):
"""
Descriptions
------------
对变量做cap处理,主要包括以下几点:
1. 只对numerical的变量做处理
2. cap操作默认用5倍p99(有指定值优先用指定值)
3. 对missing值不处理
Atributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的cap值
apply: 根据reference table对变量做cap处理
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件(必填)
apply_list: list, default None
需要处理的变量列表,若未指定,则为config文件中Ind_Cap=1的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Ind_Cap'] == 1)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的cap值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
for var in self.apply_list:
df_config_var = self.config[self.config['Var_Name'] == var]
if df_config_var['Cap_Value'].isnull().iloc[0] == True:
cap_value = df_master[var][~df_master[var].isnull()].quantile(0.99) # 忽略缺失值
else:
cap_value = float(df_config_var['Cap_Value'].iloc[0])
self.reference.loc[self.reference['Var_Name'] == var, 'Cap_Value'] = cap_value
return self.reference
def apply(self, df_master):
"""
根据reference table对变量做cap处理
Parameters
----------
df_master: DataFrame
"""
for var in self.apply_list:
cap_value = float(self.reference['Cap_Value'][self.reference['Var_Name'] == var].iloc[0])
if pd.isnull(cap_value):
raise ValueError('Not found cap value of "{0}"'.format(var))
df_master[var] = np.where(df_master[var] > cap_value, cap_value, df_master[var])
return df_master
class Floor(object):
"""
Descriptions
------------
对变量做floor处理,主要包括以下几点:
1. 只对numerical的变量做处理
2. 只对小于0的值做处理,默认用5p1(有指定值优先用指定值)
3. 对missing值不处理
Attributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的floor值
apply: 根据reference table对变量做floor处理
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件
apply_list: list, default None
需要处理的变量列表,若未指定,则为config中Ind_Floor=1的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Ind_Floor'] == 1)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的floor值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
for var in self.apply_list:
df_config_var = self.config[self.config['Var_Name'] == var]
if df_config_var['Floor_Value'].isnull().iloc[0] == True:
floor_value = min(5 * df_master[var][~df_master[var].isnull()].quantile(0.01), 0)
else:
floor_value = float(df_config_var['Floor_Value'].iloc[0])
self.reference.loc[self.reference['Var_Name'] == var, 'Floor_Value'] = floor_value
return self.reference
def apply(self, df_master):
"""
根据reference table对变量做floor处理
Parameters
----------
df_master: DataFrame
"""
for var in self.apply_list:
floor_value = float(self.reference['Floor_Value'][self.reference['Var_Name'] == var].iloc[0])
if pd.isnull(floor_value):
raise ValueError('Not found floor value of "{0}"'.format(var))
df_master[var] = np.where(df_master[var] < floor_value, floor_value, df_master[var])
return df_master
class MissingImpute(object):
"""
Descriptions
------------
对变量进行缺失值填充,主要包括以下几点:
1. 对于numerical变量有mean/median/指定值三种填充方式
2. 对于categorical变量有mode/指定值两种填充方式
3. 某个变量存在缺失值但没有指定填充值时会给出警告
Attributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的填充值
apply: 根据reference table对变量进行缺失值填充
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件
apply_list: list, default None
需要处理的变量列表,若未指定,则为config文件中Missing_Impute不为空的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Missing_Impute'].isnull() == False)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的填充值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
missing_cnt = df_master.isnull().sum() # 统计各变量的缺失值数量
missing_vars = list(missing_cnt[missing_cnt > 0].index) # 筛选存在缺失值的变量
for var in list(self.config['Var_Name'][self.config['Ind_Model'] == 1]):
df_config_var = self.config[self.config['Var_Name'] == var]
# 确定numerical变量的填充值
if df_config_var['Var_Type'].iloc[0] == 'numerical':
if df_config_var['Missing_Impute'].iloc[0] == 'mean':
impute_value = df_master[var].mean()
elif df_config_var['Missing_Impute'].iloc[0] == 'median':
impute_value = df_master[var].median()
elif df_config_var['Missing_Impute'].isnull().iloc[0] == False:
impute_value = float(df_config_var['Missing_Impute'].iloc[0])
else:
impute_value = None
# 确定categorical变量的填充值
elif df_config_var['Var_Type'].iloc[0] == 'categorical':
if df_config_var['Missing_Impute'].iloc[0] == 'mode':
impute_value = df_master[var].mode().iloc[0]
elif df_config_var['Missing_Impute'].isnull().iloc[0] == False:
impute_value = df_config_var['Missing_Impute'].iloc[0]
else:
impute_value = None
# 未知的变量类型报错
else:
raise TypeError('Wrong type for:{0}'.format(var))
# 更新config文件
self.reference.loc[self.reference['Var_Name'] == var, 'Missing_Impute'] = impute_value
# 检查存在缺失值但未指定填充值的变量
if var in list(self.config['Var_Name'][self.config['Ind_Model'] == 1]) and var in missing_vars:
if impute_value is None:
print('"{0}" exist missing value but no impute!'.format(var))
return self.reference
def apply(self, df_master):
"""
根据reference table对变量进行缺失值填充
Parameters
----------
df_master: DataFrame
"""
missing_cnt = df_master.isnull().sum()
missing_vars = list(missing_cnt[missing_cnt > 0].index)
for var in self.apply_list:
if var not in missing_vars:
continue
if self.reference['Var_Type'][self.reference['Var_Name'] == var].iloc[0] == 'numerical':
impute_value = float(self.reference['Missing_Impute'][self.reference['Var_Name'] == var].iloc[0])
else:
impute_value = self.reference['Missing_Impute'][self.reference['Var_Name'] == var].iloc[0]
if | pd.isnull(impute_value) | pandas.isnull |
import os, time, torch, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
import pandas as pd
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from pathlib import Path
from copy import deepcopy
# Utils
from features.Model_config import *
from features.Loss import *
from features.Batch import *
from features.Comparison import *
from models.ESTransformer import ESTransformer
class DeployedESTransformer(object):
def __init__(self, max_epochs=15, batch_size=1, batch_size_test=64, freq_of_test=-1,
learning_rate=1e-3, lr_scheduler_step_size=9, lr_decay=0.9,
per_series_lr_multip=1.0, gradient_eps=1e-8,
transformer_weight_decay=0, noise_std=0.001,
level_variability_penalty=80,
testing_percentile=50, training_percentile=50, ensemble=False,
seasonality=[4],
input_size=4,
output_size=8,
frequency=None, max_periods=20, random_seed=1,
device = 'cpu', root_dir= './',
# Transformer parameters
d_input = 4,
d_model = 48,
d_output = 6,
q = 8,
v = 8,
h = 4,
N = 4,
attention_size = None,
dropout = 0.3,
chunk_mode = 'chunk',
pe = None,
pe_period = 24, dataset_name = None):
super().__init__()
self.mc = ModelConfig(max_epochs=max_epochs, batch_size=batch_size, batch_size_test=batch_size_test,
freq_of_test=freq_of_test, learning_rate=learning_rate,
lr_scheduler_step_size=lr_scheduler_step_size, lr_decay=lr_decay,
per_series_lr_multip=per_series_lr_multip,
gradient_eps=gradient_eps,
transformer_weight_decay=transformer_weight_decay, noise_std=noise_std,
level_variability_penalty=level_variability_penalty,
testing_percentile=testing_percentile, training_percentile=training_percentile,
ensemble=ensemble,
seasonality=seasonality,
input_size=input_size,
output_size=output_size,
frequency=frequency, max_periods=max_periods, random_seed=random_seed,
device=device, root_dir=root_dir,
d_input = d_input,
d_model = d_model,
d_output = d_output,
q = q,
v = v,
h = h,
N = N,
attention_size = attention_size,
dropout = dropout,
chunk_mode = chunk_mode,
pe = pe,
pe_period = pe_period)
self.device = device
self.dataset_name = dataset_name
self._fitted = False
def instantiate_estransformer(self, exogenous_size, n_series):
self.mc.exogenous_size = exogenous_size
self.mc.n_series = n_series
self.estransformer = ESTransformer(self.mc).to(self.mc.device)
def fit(self, X_df, y_df, X_test_df=None, y_test_df=None, y_hat_benchmark='y_hat_naive2',
warm_start=False, shuffle=True, verbose=True):
# Transform long dfs to wide numpy
assert type(X_df) == pd.core.frame.DataFrame
assert type(y_df) == pd.core.frame.DataFrame
assert all([(col in X_df) for col in ['unique_id', 'ds', 'x']])
assert all([(col in y_df) for col in ['unique_id', 'ds', 'y']])
if y_test_df is not None:
assert y_hat_benchmark in y_test_df.columns, 'benchmark is not present in y_test_df, use y_hat_benchmark to define it'
# Storing dfs for OWA evaluation, initializing min_owa
self.y_train_df = y_df
self.X_test_df = X_test_df
self.y_test_df = y_test_df
self.min_owa = 4.0
self.min_epoch = 0
self.int_ds = isinstance(self.y_train_df['ds'][0], (int, np.int, np.int64))
self.y_hat_benchmark = y_hat_benchmark
X, y = self.long_to_wide(X_df, y_df)
assert len(X)==len(y)
assert X.shape[1]>=3
# Exogenous variables
unique_categories = np.unique(X[:, 1])
self.mc.category_to_idx = dict((word, index) for index, word in enumerate(unique_categories))
exogenous_size = len(unique_categories)
# Create batches (device in mc)
self.train_dataloader = Iterator(mc=self.mc, X=X, y=y)
# Random Seeds (model initialization)
torch.manual_seed(self.mc.random_seed)
np.random.seed(self.mc.random_seed)
# Initialize model
n_series = self.train_dataloader.n_series
self.instantiate_estransformer(exogenous_size, n_series)
# Validating frequencies
X_train_frequency = pd.infer_freq(X_df.head()['ds'])
y_train_frequency = pd.infer_freq(y_df.head()['ds'])
self.frequencies = [X_train_frequency, y_train_frequency]
if (X_test_df is not None) and (y_test_df is not None):
X_test_frequency = pd.infer_freq(X_test_df.head()['ds'])
y_test_frequency = pd.infer_freq(y_test_df.head()['ds'])
self.frequencies += [X_test_frequency, y_test_frequency]
assert len(set(self.frequencies)) <= 1, \
"Match the frequencies of the dataframes {}".format(self.frequencies)
self.mc.frequency = self.frequencies[0]
print("Infered frequency: {}".format(self.mc.frequency))
# Train model
self._fitted = True
self.train(dataloader=self.train_dataloader, max_epochs=self.mc.max_epochs,
warm_start=warm_start, shuffle=shuffle, verbose=verbose)
def train(self, dataloader, max_epochs, warm_start=False, shuffle=True, verbose=True):
if self.mc.ensemble: self.estransformer_ensemble = [deepcopy(self.estransformer).to(self.mc.device)] * 5
if verbose: print(15*'='+' Training ESTransformer ' + 15*'=' + '\n')
# Model parameters
es_parameters = filter(lambda p: p.requires_grad, self.estransformer.es.parameters())
params = sum([np.prod(p.size()) for p in es_parameters])
print('Number of parameters of ES: ', params)
trans_parameters = filter(lambda p: p.requires_grad, self.estransformer.transformer.parameters())
params = sum([np.prod(p.size()) for p in trans_parameters])
print('Number of parameters of Transformer: ', params)
# Optimizers
if not warm_start:
self.es_optimizer = optim.Adam(params=self.estransformer.es.parameters(),
lr=self.mc.learning_rate*self.mc.per_series_lr_multip,
betas=(0.9, 0.999), eps=self.mc.gradient_eps)
self.es_scheduler = StepLR(optimizer=self.es_optimizer, step_size=self.mc.lr_scheduler_step_size, gamma=0.9)
self.transformer_optimizer = optim.Adam(params=self.estransformer.transformer.parameters(),
lr=self.mc.learning_rate, betas=(0.9, 0.999), eps=self.mc.gradient_eps,
weight_decay=self.mc.transformer_weight_decay)
self.transformer_scheduler = StepLR(optimizer=self.transformer_optimizer,
step_size=self.mc.lr_scheduler_step_size, gamma=self.mc.lr_decay)
all_epoch = []
all_train_loss = []
all_test_loss = []
# Loss Functions
train_tau = self.mc.training_percentile / 100
train_loss = SmylLoss(tau=train_tau, level_variability_penalty=self.mc.level_variability_penalty)
eval_tau = self.mc.testing_percentile / 100
eval_loss = PinballLoss(tau=eval_tau)
for epoch in range(max_epochs):
self.estransformer.train()
start = time.time()
if shuffle: dataloader.shuffle_dataset(random_seed=epoch)
losses = []
for j in range(dataloader.n_batches):
self.es_optimizer.zero_grad()
self.transformer_optimizer.zero_grad()
batch = dataloader.get_batch()
windows_y, windows_y_hat, levels = self.estransformer(batch)
# Pinball loss on normalized values
loss = train_loss(windows_y, windows_y_hat, levels)
losses.append(loss.data.cpu().numpy())
loss.backward()
self.transformer_optimizer.step()
self.es_optimizer.step()
# Decay learning rate
self.es_scheduler.step()
self.transformer_scheduler.step()
if self.mc.ensemble:
copy_estransformer = deepcopy(self.estransformer)
copy_estransformer.eval()
self.estransformer_ensemble.pop(0)
self.estransformer_ensemble.append(copy_estransformer)
# Evaluation
self.train_loss = np.mean(losses)
if verbose:
print("========= Epoch {} finished =========".format(epoch))
print("Training time: {}".format(round(time.time()-start, 5)))
print("Training loss ({} prc): {:.5f}".format(self.mc.training_percentile, self.train_loss))
self.test_loss = self.model_evaluation(dataloader, eval_loss)
print("Testing loss ({} prc): {:.5f}".format(self.mc.testing_percentile, self.test_loss))
self.evaluate_model_prediction(self.y_train_df, self.X_test_df, self.y_test_df, self.y_hat_benchmark, epoch=epoch)
self.estransformer.train()
all_epoch.append(epoch)
all_train_loss.append(self.train_loss)
all_test_loss.append(self.test_loss)
converge = pd.DataFrame({'Epoch': all_epoch, 'Train loss': all_train_loss, 'Test loss': all_test_loss})
# converge.to_csv("D:\\Sang\\hybcast\\hybcast3\\" + self.dataset_name + 'log_' + self.dataset_name +'.csv', index=False)
if (epoch % 100 == 0) or (epoch % 499 == 0):
# self.save(model_dir="D:\\Sang\\hybcast\\hybcast3\\" + self.dataset_name +'\\model\\', epoch=epoch)
None
if verbose: print('Train finished! \n')
def predict(self, X_df, decomposition=False):
assert type(X_df) == pd.core.frame.DataFrame
assert 'unique_id' in X_df
assert self._fitted, "Model not fitted yet"
self.estransformer.eval()
# Create fast dataloader
if self.mc.n_series < self.mc.batch_size_test: new_batch_size = self.mc.n_series
else: new_batch_size = self.mc.batch_size_test
self.train_dataloader.update_batch_size(new_batch_size)
dataloader = self.train_dataloader
# Create Y_hat_panel placeholders
output_size = self.mc.output_size
n_unique_id = len(dataloader.sort_key['unique_id'])
panel_unique_id = pd.Series(dataloader.sort_key['unique_id']).repeat(output_size)
#access column with last train date
panel_last_ds = pd.Series(dataloader.X[:, 2])
panel_ds = []
for i in range(len(panel_last_ds)):
ranges = | pd.date_range(start=panel_last_ds[i], periods=output_size+1, freq=self.mc.frequency) | pandas.date_range |
from os import abort
from requests import get
from bs4 import BeautifulSoup
from pandas import read_html, concat, DataFrame, read_csv
from .utils import url_daerah, total_page, _baseurl_
def get_daerah() -> list:
page = get(_baseurl_)
data = []
soup = BeautifulSoup(page.text, 'lxml')
table = soup.find_all('td')
for i in table:
name = i.find('a').text.strip()
link = i.find('a')['href'].strip().split('/')[-2]
data.append({
"Nama" : name,
"Link": link
})
return data
def setup_provinsi():
list_url = url_daerah()
for i in list_url:
tpage = total_page('{}/{}'.format(_baseurl_, i))
data = []
for j in range(tpage+1):
url = 'https://carikodepos.com/daerah/{}/page/{}/'.format(i,j)
r = get(url)
res = read_html(r.text)
data.append(res)
tail1 = data[len(data)-1][0]
try:
tail2 = data[len(data)-1][1]
except:
tail2 = DataFrame()
for k in range(len(data[:-1])):
data1 = data[k][0].convert_dtypes()
data2 = data[k][1].convert_dtypes()
if k == 0:
df1 = concat([tail1, data1])
df2 = concat([tail2, data2])
else:
df1 = | concat([df1, data1]) | pandas.concat |
#functions to be used in the data preparation process
import pandas as pd
import numpy as np
import sklearn.metrics as metric
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
def market_columns(df):
"""Function that maps multiple entries in a column into individual columns
and assings a score of 1 or 0 if the entry is question is within a given row.
"""
categories = []
for category in list(df['Market Category'].unique()):
categories += category.split(',')
unique = set(categories)
for col in unique:
df[col] = df['Market Category'].apply(lambda x: 1 if col in x.split(',') else 0)
df.drop('Market Category', axis=1, inplace=True)
return df
def onehotencode(X):
"""
One hot encode the categorical variables in the dataframe to convert them to numerical variables.
"""
X_obj = X[[col for col,dtype in list(zip(X.columns, X.dtypes))
if dtype == np.dtype('O')]]
X_nonobj = X[[col for col,dtype in list(zip(X.columns, X.dtypes))
if dtype != np.dtype('O')]]
ohe = OneHotEncoder(handle_unknown='ignore')
X_obj_ohe = ohe.fit_transform(X_obj)
X_nonobj_df = pd.DataFrame(X_nonobj).reset_index(drop=True)
X_obj_ohe_df = pd.DataFrame(X_obj_ohe.todense(), columns=ohe.get_feature_names()).reset_index(drop=True)
X_all = | pd.concat([X_nonobj_df, X_obj_ohe_df], axis=1) | pandas.concat |
"""
Validates the exported json files with some data from SQL database.
"""
import json
import os
import pandas as pd
from multiprocessing import Pool, RLock
from tqdm import tqdm
from projects.data_cleaning import *
def validate_data(output_folder, patientunitstayid):
query_schema, conn = connect_to_database()
for table_name in TABLE_LIST:
query = query_schema + """
select *
from {}
where patientunitstayid = {}
""".format(table_name, patientunitstayid)
df = pd.read_sql_query(query, conn)
df.sort_values(df.columns[0], ascending=True,
inplace=True, ignore_index=True)
json_path = f"{output_folder}/{patientunitstayid}.json"
with open(json_path, 'r') as json_file:
json_dict = json.load(json_file)
df_json = | pd.DataFrame(json_dict[table_name]) | pandas.DataFrame |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import cv2
import os
from src.models.storage.batch import Batch
from src.parser.parser import Parser
from src.optimizer.statement_to_opr_convertor import StatementToPlanConvertor
from src.optimizer.plan_generator import PlanGenerator
from src.executor.plan_executor import PlanExecutor
from src.models.catalog.frame_info import FrameInfo
from src.models.catalog.properties import ColorSpace
from src.udfs.abstract_udfs import AbstractClassifierUDF
NUM_FRAMES = 10
def create_dataframe(num_frames=1) -> pd.DataFrame:
frames = []
for i in range(1, num_frames + 1):
frames.append({"id": i, "data": (i * np.ones((1, 1)))})
return pd.DataFrame(frames)
def create_dataframe_same(times=1):
base_df = create_dataframe()
for i in range(1, times):
base_df = base_df.append(create_dataframe())
return base_df
def custom_list_of_dicts_equal(one, two):
for v1, v2 in zip(one, two):
if v1.keys() != v2.keys():
return False
for key in v1.keys():
if isinstance(v1[key], np.ndarray):
if not np.array_equal(v1[key], v2[key]):
return False
else:
if v1[key] != v2[key]:
return False
return True
def create_sample_video(num_frames=NUM_FRAMES):
try:
os.remove('dummy.avi')
except FileNotFoundError:
pass
out = cv2.VideoWriter('dummy.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10,
(2, 2))
for i in range(num_frames):
frame = np.array(np.ones((2, 2, 3)) * 0.1 * float(i + 1) * 255,
dtype=np.uint8)
out.write(frame)
def create_dummy_batches(num_frames=NUM_FRAMES,
filters=[], batch_size=10, start_id=0):
if not filters:
filters = range(num_frames)
data = []
for i in filters:
data.append({'id': i + start_id,
'data': np.array(
np.ones((2, 2, 3)) * 0.1 * float(i + 1) * 255,
dtype=np.uint8)})
if len(data) % batch_size == 0:
yield Batch(pd.DataFrame(data))
data = []
if data:
yield Batch(pd.DataFrame(data))
def perform_query(query):
stmt = Parser().parse(query)[0]
l_plan = StatementToPlanConvertor().visit(stmt)
p_plan = PlanGenerator().build(l_plan)
return PlanExecutor(p_plan).execute_plan()
class DummyObjectDetector(AbstractClassifierUDF):
@property
def name(self) -> str:
return "dummyObjectDetector"
def __init__(self):
super().__init__()
@property
def input_format(self):
return FrameInfo(-1, -1, 3, ColorSpace.RGB)
@property
def labels(self):
return ['__background__', 'person', 'bicycle']
def classify(self, frames: pd.DataFrame):
# odd are labeled bicycle and even person
labels = [self.labels[i % 2 + 1] for i in range(len(frames))]
prediction_df_list = | pd.DataFrame({'label': labels}) | pandas.DataFrame |
import unittest
from unittest import mock
import networkx as nx
import numpy as np
import pandas as pd
import cassiopeia as cas
from cassiopeia.plotting import local
class TestLocalPlotting(unittest.TestCase):
def setUp(self):
self.allele_table = pd.DataFrame.from_dict(
{
1: ["2", "A", 10, "i", "j", "k"],
3: ["3", "A", 10, "i", "m", "n"],
4: ["5", "A", 10, "i", "j", "k"],
6: ["6", "A", 10, "i", "j", "m"],
},
orient="index",
columns=["cellBC", "intBC", "UMI", "r1", "r2", "r3"],
)
graph = nx.DiGraph()
graph.add_edges_from(
[
("root", "1"),
("1", "2"),
("1", "3"),
("root", "4"),
("4", "5"),
("4", "6"),
]
)
# cell meta
cell_meta = pd.DataFrame(index=["2", "3", "5", "6"])
cell_meta["nUMI"] = [1, 3, 5, 7]
cell_meta["cluster"] = ["a", "a", "b", "b"]
self.tree = cas.data.CassiopeiaTree(tree=graph, cell_meta=cell_meta)
def test_compute_colorstrip_size(self):
expected = (0.05, 1.0)
size = local.compute_colorstrip_size(
{"0": (0, 0), "1": (0, 1), "2": (1, 1)},
{"1": (0, 1), "2": (1, 1)},
loc="up",
)
np.testing.assert_allclose(size, expected)
size = local.compute_colorstrip_size(
{"0": (0, 0), "1": (0, 1), "2": (1, 1)},
{"1": (0, 1), "2": (1, 1)},
loc="up",
)
np.testing.assert_allclose(size, expected)
size = local.compute_colorstrip_size(
{"0": (0, 0), "1": (1, 0), "2": (1, 1)},
{"1": (1, 0), "2": (1, 1)},
loc="right",
)
np.testing.assert_allclose(size, expected)
size = local.compute_colorstrip_size(
{"0": (0, 0), "1": (-1, 0), "2": (-1, 1)},
{"1": (-1, 0), "2": (-1, 1)},
loc="left",
)
np.testing.assert_allclose(size, expected)
size = local.compute_colorstrip_size(
{"0": (0, 0), "1": (0, 1), "2": (1, 1)},
{"1": (0, 1), "2": (1, 1)},
loc="polar",
)
np.testing.assert_allclose(size, expected)
def test_create_categorical_colorstrip(self):
expected_colorstrip = {
"1": ([1, -1, -1, 1, 1], [2, 2, 1, 1, 2], mock.ANY, mock.ANY),
"2": ([2, 0, 0, 2, 2], [2, 2, 1, 1, 2], mock.ANY, mock.ANY),
}
expected_next_anchor_coords = {"1": (0, 2), "2": (1, 2)}
colorstrip, next_anchor_coords = local.create_categorical_colorstrip(
{"1": "a", "2": "b"}, {"1": (0, 0), "2": (1, 0)}, 1, 2, 1, "up"
)
self.assertEqual(colorstrip, expected_colorstrip)
self.assertEqual(next_anchor_coords, expected_next_anchor_coords)
def test_create_continous_colorstrip(self):
expected_colorstrip = {
"1": ([1, -1, -1, 1, 1], [2, 2, 1, 1, 2], mock.ANY, mock.ANY),
"2": ([2, 0, 0, 2, 2], [2, 2, 1, 1, 2], mock.ANY, mock.ANY),
}
expected_next_anchor_coords = {"1": (0, 2), "2": (1, 2)}
colorstrip, next_anchor_coords = local.create_continuous_colorstrip(
{"1": -10, "2": 10}, {"1": (0, 0), "2": (1, 0)}, 1, 2, 1, "up"
)
self.assertEqual(colorstrip, expected_colorstrip)
self.assertEqual(next_anchor_coords, expected_next_anchor_coords)
def test_create_indel_heatmap(self):
indel_colors = {
"i": [0.75, 0, 0.5],
"j": [0.05, 0.88, 0.94],
"k": [0.69, 1.0, 1.0],
"m": [0.798, 0.37, 0.68],
"n": [0.56, 0.37, 0.68],
}
indel_color_df = | pd.DataFrame(columns=["color"]) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import numpy as np
def locate_na(data: pd.DataFrame) -> dict:
"""
Locate and return the indices to all missing values within an inputted dataframe.
Each element of the returned dictionary will be a column in a dataframe, which will
contain the row indices of the missing values.
Parameters
----------
data : dataframe
This is the dataframe that the function will use to locate NAs.
Returns
-------
dictionary of lists
key = column indices that contain missing values
value = list of row indices that have missing values
>>> locate_na(pd.DataFrame(np.array([[“Yes”, “No”], [None, “Yes”]])))
{"0": [1]}
>>> locate_na(pd.DataFrame(np.array([[1, 2, None], [None, 2, 3]])))
{"0": [1], "2": [0]}
"""
# testing code
try:
import pandas as pd
except ImportError:
# Give a nice error message
raise ImportError("the pandas library is not installed\n"
"you can install via conda\n"
"conda install pandas\n"
"or: python -m pip install pandas\n")
try:
import numpy as np
except ImportError:
# Give a nice error message
raise ImportError("the numpy library is not installed\n"
"you can install via conda\n"
"conda install numpy\n")
try:
if not isinstance(data, pd.DataFrame):
raise(TypeError)
col_na: dict = {}
for i in data:
row_na: list = []
for j in range(len(data[i])):
if ( | pd.isna(data[i][j]) | pandas.isna |
# coding=utf-8
# Author: <NAME> & <NAME>
# Date: Jan 06, 2021
#
# Description: Parse Epilepsy Foundation Forums and extract dictionary matches
#
import os
import sys
#
#include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'include'))
include_path = '/nfs/nfs7/home/rionbr/myaura/include'
sys.path.insert(0, include_path)
#
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
#
import db_init as db
import utils
from load_dictionary import load_dictionary, build_term_parser
from termdictparser import Sentences
if __name__ == '__main__':
#
# Init
#
dicttimestamp = '20180706'
# Load Dictionary
dfD = load_dictionary(dicttimestamp=dicttimestamp, server='etrash-mysql-ddi-dictionaries')
# Build Parser Vocabulary
tdp = build_term_parser(dfD)
#
dict_token = dfD['token'].to_dict()
dict_id_parent = dfD['id_parent'].to_dict()
dict_parent = dfD['parent'].to_dict()
# dict_dictionary = dfD['dictionary'].to_dict()
dict_type = dfD['type'].to_dict()
# dict_source = dfD['source'].to_dict()
#
# Connect to MySQL
#
engine = db.connectToMySQL(server='etrash-mysql-epilepsy')
#
# Get Users
#
sql = """
SELECT
pid,
uid,
topicid,
created,
title,
text_clean
FROM dw_forums
"""
df = | pd.read_sql(sql, con=engine) | pandas.read_sql |
# Poslanci a Osoby
# Agenda eviduje osoby, jejich zařazení do orgánů a jejich funkce v orgánech a orgány jako takové.
# Informace viz https://www.psp.cz/sqw/hp.sqw?k=1301.
from os import path
import pandas as pd
import numpy as np
from parlamentikon.utility import *
from parlamentikon.Snemovna import *
from parlamentikon.TabulkyPoslanciOsoby import *
from parlamentikon.setup_logger import log
class PoslanciOsobyBase(SnemovnaZipDataMixin, SnemovnaDataFrame):
"""Obecná třída pro dceřiné třídy (Osoby, Organy, Poslanci, etc.)"""
def __init__(self, stahni=True, *args, **kwargs):
log.debug("--> PoslanciOsobyBase")
super().__init__(*args, **kwargs)
if stahni == True:
self.stahni_zip_data("poslanci")
log.debug("<-- PoslanciOsobyBase")
class TypOrgan(TabulkaTypOrganMixin, PoslanciOsobyBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.nacti_typ_organ()
self.nastav_dataframe(
self.tbl['typ_organ'],
odstran=['priorita'],
vyber=['id_typ_organ', 'nazev_typ_organ_cz', 'nazev_typ_organ_en'])
class Organy(TabulkaOrganyMixin, TypOrgan):
def __init__(self, *args, **kwargs):
log.debug("--> Organy")
super().__init__(*args, **kwargs)
self.nacti_organy()
# Připoj Typu orgánu
suffix = "__typ_organ"
self.tbl['organy'] = pd.merge(left=self.tbl['organy'], right=self.tbl['typ_organ'], on="id_typ_organ", suffixes=("",suffix), how='left')
# Odstraň nedůležité sloupce 'priorita', protože se vzájemně vylučují a nejspíš k ničemu nejsou.
# Tímto se vyhneme varování funkce 'drop_by_inconsistency.
self.tbl['organy'].drop(columns=["priorita", "priorita__typ_organ"], inplace=True)
self.tbl['organy'] = self.drop_by_inconsistency(self.tbl['organy'], suffix, 0.1, 'organy', 'typ_organ')
# Nastav volební období, pokud chybí
if self.volebni_obdobi == None:
self.volebni_obdobi = self._posledni_snemovna().od_organ.year
log.debug(f"Nastavuji začátek volebního období na: {self.volebni_obdobi}.")
if self.volebni_obdobi != -1:
x = self.tbl['organy'][
(self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna')
& (self.tbl['organy'].od_organ.dt.year == self.volebni_obdobi)
]
if len(x) == 1:
self.snemovna = x.iloc[0]
else:
log.error('Bylo nalezeno více sněmoven pro dané volební období!')
raise ValueError
self.tbl['organy'] = self.vyber_platne_organy()
self.nastav_dataframe(self.tbl['organy'])
log.debug("<-- Organy")
def vyber_platne_organy(self, df=None):
if df == None:
df = self.tbl['organy']
if self.volebni_obdobi == -1:
return df
ids_snemovnich_organu = expand_hierarchy(df, 'id_organ', 'organ_id_organ', [self.snemovna.id_organ])
# TODO: Kdy použít od_f místo od_o, resp. do_f místo do_o?
interval_start = df.od_organ\
.mask(df.od_organ.isna(), self.snemovna.od_organ)\
.mask(~df.od_organ.isna(), np.maximum(df.od_organ, self.snemovna.od_organ))
# Pozorování: volebni_obdobi_od není nikdy NaT => interval_start není nikdy NaT
if pd.isna(self.snemovna.do_organ): # příznak posledního volebního období
podminka_interval = (
(interval_start.dt.date <= df.do_organ.dt.date) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_organ != NaT)
| df.do_organ.isna() # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_organ == NaT)
)
else: # Pozorování: předchozí volební období => interval_end není nikdy NaT
interval_end = df.do_organ\
.mask(df.do_organ.isna(), self.snemovna.do_organ)\
.mask(~df.do_organ.isna(), np.minimum(df.do_organ, self.snemovna.do_organ))
podminka_interval = (interval_start.dt.date <= interval_end.dt.date)
ids_jinych_snemoven = []
x = self._predchozi_snemovna()
if x is not None:
ids_jinych_snemoven.append(x.id_organ)
x = self._nasledujici_snemovna()
if x is not None:
ids_jinych_snemoven.append(x.id_organ)
#ids_jinych_snemovnich_organu = find_children_ids(ids_jinych_snemoven, 'id_organ', df, 'organ_id_organ', ids_jinych_snemoven, 0)
ids_jinych_snemovnich_organu = expand_hierarchy(df, 'id_organ', 'organ_id_organ', ids_jinych_snemoven)
podminka_nepatri_do_jine_snemovny = ~df.id_organ.isin(ids_jinych_snemovnich_organu)
df = df[
(df.id_organ.isin(ids_snemovnich_organu) == True)
| (podminka_interval & podminka_nepatri_do_jine_snemovny)
]
return df
def _posledni_snemovna(self):
"""Pomocná funkce, vrací data poslední sněmovny"""
p = self.tbl['organy'][(self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna') & (self.tbl['organy'].do_organ.isna())].sort_values(by=["od_organ"])
if len(p) == 1:
return p.iloc[0]
else:
return None
def _predchozi_snemovna(self, id_organ=None):
"""Pomocná funkce, vrací data předchozí sněmovny"""
# Pokud nebylo zadáno id_orgánu, implicitně vezmi id_organ dané sněmovny.
if id_organ == None:
id_organ = self.snemovna.id_organ
snemovny = self.tbl['organy'][self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna'].sort_values(by="do_organ").copy()
snemovny['id_predchozi_snemovny'] = snemovny.id_organ.shift(1)
idx = snemovny[snemovny.id_organ == id_organ].iloc[0].id_predchozi_snemovny
p = snemovny[snemovny.id_organ == idx]
assert len(p) <= 1
if len(p) == 1:
return p.iloc[0]
else:
return None
def _nasledujici_snemovna(self, id_organ=None):
"""Pomocná funkce, vrací data následující sněmovny"""
# Pokud nebylo zadáno id_orgánu, implicitně vezmi id_organ dané sněmovny.
if id_organ == None:
id_organ = self.snemovna.id_organ
snemovny = self.tbl['organy'][self.tbl['organy'].nazev_organ_cz == 'Poslanecká sněmovna'].sort_values(by="do_organ").copy()
snemovny['id_nasledujici_snemovny'] = snemovny.id_organ.shift(-1)
idx = snemovny[snemovny.id_organ == id_organ].iloc[0].id_nasledujici_snemovny
p = snemovny[snemovny.id_organ == idx]
assert len(p) <= 1
if len(p) == 1:
return p.iloc[0]
else:
return None
# Tabulka definuje typ funkce v orgánu - pro každý typ orgánu jsou definovány typy funkcí. Texty názvů typu funkce se používají při výpisu namísto textů v Funkce:nazev_funkce_LL .
# Třída TypFunkce nebere v úvahu závislost na volebnim obdobi, protože tu je možné získat až pomocí dceřinných tříd (ZarazeniOsoby).
class TypFunkce(TabulkaTypFunkceMixin, TypOrgan):
def __init__(self, *args, **kwargs):
log.debug("--> TypFunkce")
super().__init__(*args, **kwargs)
self.nacti_typ_funkce()
# Připoj Typu orgánu
suffix="__typ_organ"
self.tbl['typ_funkce'] = pd.merge(
left=self.tbl['typ_funkce'],
right=self.tbl['typ_organ'],
on="id_typ_organ",
suffixes=("", suffix),
how='left'
)
# Odstraň nedůležité sloupce 'priorita', protože se vzájemně vylučují a nejspíš ani k ničemu nejsou.
# Tímto se vyhneme varování v 'drop_by_inconsistency'.
self.tbl['typ_funkce'].drop(columns=["priorita", "priorita__typ_organ"], inplace=True)
self.tbl['typ_funkce'] = self.drop_by_inconsistency(self.tbl['typ_funkce'], suffix, 0.1, t1_name='typ_funkce', t2_name='typ_organ', t1_on='id_typ_organ', t2_on='id_typ_organ')
self.nastav_dataframe(
self.tbl['typ_funkce'],
vyber=['id_typ_funkce', 'typ_funkce_cz', 'typ_funkce_en', 'typ_funkce_obecny'],
odstran=['typ_funkce_obecny__ORIG']
)
log.debug("<-- TypFunkce")
class Funkce(TabulkaFunkceMixin, Organy, TypFunkce):
def __init__(self, *args, **kwargs):
log.debug("--> Funkce")
super().__init__(*args, **kwargs)
self.nacti_funkce()
# Zúžení
self.vyber_platne_funkce()
# Připoj Orgány
suffix = "__organy"
self.tbl['funkce'] = pd.merge(
left=self.tbl['funkce'],
right=self.tbl['organy'],
on='id_organ',
suffixes=("", suffix),
how='left'
)
self.tbl['funkce'] = self.drop_by_inconsistency(self.tbl['funkce'], suffix, 0.1, 'funkce', 'organy')
# Připoj Typ funkce
suffix = "__typ_funkce"
self.tbl['funkce'] = pd.merge(left=self.tbl['funkce'], right=self.tbl['typ_funkce'], on="id_typ_funkce", suffixes=("", suffix), how='left')
# Fix the knows inconsistency in data
x = self.tbl['funkce']
idx = x[(x.id_typ_organ == 42) & (x.id_typ_organ__typ_funkce == 15)].index
log.debug(f"Řešení známé nekonzistence v datech: Upřednostňuji sloupce z tabulky 'funkce' před 'typ_funkce' pro {len(idx)} hodnot.")
to_update = ['id_typ_organ', 'typ_id_typ_organ', 'nazev_typ_organ_cz', 'nazev_typ_organ_en', 'typ_organ_obecny']
for i in to_update:
x.at[idx, i + '__typ_funkce'] = x.loc[idx][i]
self.tbl['funkce'] = self.drop_by_inconsistency(self.tbl['funkce'], suffix, 0.1, 'funkce', 'typ_funkce', t1_on='id_typ_funkce', t2_on='id_typ_funkce')
if self.volebni_obdobi != -1:
assert len(self.tbl['funkce'][self.tbl['funkce'].id_organ.isna()]) == 0
self.nastav_dataframe(self.tbl['funkce'])
log.debug("<-- Funkce")
def vyber_platne_funkce(self):
if self.volebni_obdobi != -1:
self.tbl['funkce'] = self.tbl['funkce'][self.tbl['funkce'].id_organ.isin(self.tbl['organy'].id_organ)]
class Osoby(TabulkaOsobaExtraMixin, TabulkaOsobyMixin, PoslanciOsobyBase):
def __init__(self, *args, **kwargs):
log.debug("--> Osoby")
super(Osoby, self).__init__(*args, **kwargs)
self.nacti_osoby()
self.nacti_osoba_extra()
#suffix='__osoba_extra'
#self.tbl['osoby'] = pd.merge(left=self.tbl['osoby'], right=self.tbl['osoba_extra'], on="id_osoba", how="left", suffixes=('', suffix))
#self.drop_by_inconsistency(self.tbl['osoby'], suffix, 0.1, 'hlasovani', 'osoba_extra', inplace=True)
self.nastav_dataframe(self.tbl['osoby'])
log.debug("<-- Osoby")
class ZarazeniOsoby(TabulkaZarazeniOsobyMixin, Funkce, Organy, Osoby):
def __init__(self, *args, **kwargs):
log.debug("--> ZarazeniOsoby")
super().__init__(*args, **kwargs)
self.nacti_zarazeni_osoby()
# Připoj Osoby
suffix = "__osoby"
self.tbl['zarazeni_osoby'] = pd.merge(left=self.tbl['zarazeni_osoby'], right=self.tbl['osoby'], on='id_osoba', suffixes = ("", suffix), how='left')
self.tbl['zarazeni_osoby'] = self.drop_by_inconsistency(self.tbl['zarazeni_osoby'], suffix, 0.1, 'zarazeni_osoby', 'osoby')
# Připoj Organy
suffix = "__organy"
sub1 = self.tbl['zarazeni_osoby'][self.tbl['zarazeni_osoby'].cl_funkce == 'členství'].reset_index()
if self.volebni_obdobi == -1:
m1 = pd.merge(left=sub1, right=self.tbl['organy'], left_on='id_of', right_on='id_organ', suffixes=("", suffix), how='left')
else:
# Pozor, how='left' nestačí, 'inner' se podílí na zúžení na danou sněmovnu
m1 = pd.merge(left=sub1, right=self.tbl['organy'], left_on='id_of', right_on='id_organ', suffixes=("", suffix), how='inner')
m1 = self.drop_by_inconsistency(m1, suffix, 0.1, 'zarazeni_osoby', 'organy')
# Připoj Funkce
sub2 = self.tbl['zarazeni_osoby'][self.tbl['zarazeni_osoby'].cl_funkce == 'funkce'].reset_index()
if self.volebni_obdobi == -1:
m2 = pd.merge(left=sub2, right=self.tbl['funkce'], left_on='id_of', right_on='id_funkce', suffixes=("", suffix), how='left')
else:
# Pozor, how='left' nestačí, 'inner' se podílí na zúžení na danou sněmovnu
m2 = pd.merge(left=sub2, right=self.tbl['funkce'], left_on='id_of', right_on='id_funkce', suffixes=("", suffix), how='inner')
m2 = self.drop_by_inconsistency(m2, suffix, 0.1, 'zarazeni_osoby', 'funkce')
self.tbl['zarazeni_osoby'] = pd.concat([m1, m2], axis=0, ignore_index=True).set_index('index').sort_index()
# Zúžení na dané volební období
self.vyber_platne_zarazeni_osoby()
self.nastav_dataframe(self.tbl['zarazeni_osoby'])
log.debug("<-- ZarazeniOsoby")
def vyber_platne_zarazeni_osoby(self):
if self.volebni_obdobi != -1:
interval_start = self.tbl['zarazeni_osoby'].od_o\
.mask(self.tbl['zarazeni_osoby'].od_o.isna(), self.snemovna.od_organ)\
.mask(~self.tbl['zarazeni_osoby'].od_o.isna(), np.maximum(self.tbl['zarazeni_osoby'].od_o, self.snemovna.od_organ))
# Pozorování: volebni_obdobi_od není nikdy NaT => interval_start není nikdy NaT
if pd.isna(self.snemovna.do_organ): # příznak posledního volebního období
podminka_interval = (
(interval_start.dt.date <= self.tbl['zarazeni_osoby'].do_o.dt.date) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_o != NaT)
| (self.tbl['zarazeni_osoby'].do_o.isna()) # Nutná podmínka pro True: (interval_start != NaT, splněno vždy) a (do_o == NaT)
)
else: # Pozorování: předchozí volební období => interval_end není nikdy NaT
interval_end = self.tbl['zarazeni_osoby'].do_o\
.mask(self.tbl['zarazeni_osoby'].do_o.isna(), self.snemovna.do_organ)\
.mask(~self.tbl['zarazeni_osoby'].do_o.isna(), np.minimum(self.tbl['zarazeni_osoby'].do_o, self.snemovna.do_organ))
podminka_interval = (interval_start.dt.date <= interval_end.dt.date)
self.tbl['zarazeni_osoby'] = self.tbl['zarazeni_osoby'][podminka_interval]
class Poslanci(TabulkaPoslanciPkgpsMixin, TabulkaPoslanciMixin, ZarazeniOsoby, Organy):
def __init__(self, *args, **kwargs):
log.debug("--> Poslanci")
super().__init__(*args, **kwargs)
self.nacti_poslanci_pkgps()
self.nacti_poslance()
# Zúžení na dané volební období
if self.volebni_obdobi != -1:
self.tbl['poslanci'] = self.tbl['poslanci'][self.tbl['poslanci'].id_organ == self.snemovna.id_organ]
# Připojení informace o osobě, např. jméno a příjmení
suffix = "__osoby"
self.tbl['poslanci'] = pd.merge(left=self.tbl['poslanci'], right=self.tbl['osoby'], on='id_osoba', suffixes = ("", suffix), how='left')
self.tbl['poslanci'] = self.drop_by_inconsistency(self.tbl['poslanci'], suffix, 0.1, 'poslanci', 'osoby')
# Připoj informace o kanceláři
suffix = "__poslanci_pkgps"
self.tbl['poslanci'] = pd.merge(left=self.tbl['poslanci'], right=self.tbl['poslanci_pkgps'], on='id_poslanec', suffixes = ("", suffix), how='left')
self.drop_by_inconsistency(self.tbl['poslanci'], suffix, 0.1, 'poslanci', 'poslanci_pkgps', inplace=True)
# Připoj informace o kandidátce
suffix = "__organy"
self.tbl['poslanci'] = pd.merge(left=self.tbl['poslanci'], right=self.tbl['organy'][["id_organ", "nazev_organ_cz", "zkratka"]], left_on='id_kandidatka', right_on='id_organ', suffixes = ("", suffix), how='left')
self.tbl['poslanci'].drop(columns=['id_organ__organy'], inplace=True)
self.tbl['poslanci'].rename(columns={'nazev_organ_cz': 'nazev_kandidatka_cz', 'zkratka': 'zkratka_kandidatka'}, inplace=True)
self.drop_by_inconsistency(self.tbl['poslanci'], suffix, 0.1, 'poslanci', 'organy', t1_on='id_organ', t2_on='id_kandidatka', inplace=True)
self.meta.nastav_hodnotu('nazev_kandidatka_cz', {"popis": 'Název strany, za kterou poslanec kandidoval, viz Organy:nazev_organ_cz', 'tabulka': 'df', 'vlastni': True})
self.meta.nastav_hodnotu('zkratka_kandidatka', {"popis": 'Zkratka strany, za kterou poslanec kandidoval, viz Organy:nazev_organ_cz', 'tabulka': 'df', 'vlastni': True})
# Připoj informace o kraji
suffix = "__organy"
self.tbl['poslanci'] = pd.merge(left=self.tbl['poslanci'], right=self.tbl['organy'][["id_organ", "nazev_organ_cz", "zkratka"]], left_on='id_kraj', right_on='id_organ', suffixes = ("", suffix), how='left')
self.tbl['poslanci'].drop(columns=['id_organ__organy'], inplace=True)
self.tbl['poslanci'].rename(columns={'nazev_organ_cz': 'nazev_kraj_cz', 'zkratka': 'zkratka_kraj'}, inplace=True)
self.drop_by_inconsistency(self.tbl['poslanci'], suffix, 0.1, 'poslanci', 'organy', t1_on='id_kraj', t2_on='id_organ', inplace=True)
self.meta.nastav_hodnotu('nazev_kraj_cz', {"popis": 'Název kraje, za který poslanec kandidoval, viz Organy:nazev_organ_cz', 'tabulka': 'df', 'vlastni': True})
self.meta.nastav_hodnotu('zkratka_kraj', {"popis": 'Zkratka kraje, za který poslanec kandidoval, viz Organy:nazev_organ_cz', 'tabulka': 'df', 'vlastni': True})
# Pripoj data nastoupení do parlamentu, příp. odstoupení z parlamentu
parlament = self.tbl['zarazeni_osoby'][(self.tbl['zarazeni_osoby'].id_osoba.isin(self.tbl['poslanci'].id_osoba)) & (self.tbl['zarazeni_osoby'].nazev_typ_organ_cz == "Parlament") & (self.tbl['zarazeni_osoby'].cl_funkce=='členství')].copy()
#parlament = parlament.sort_values(['id_osoba', 'od_o']).groupby('id_osoba').tail(1).reset_index()
parlament = parlament.sort_values(['id_osoba', 'od_o']).groupby('id_osoba').tail(1).reset_index()
parlament.rename(columns={'id_organ': 'id_parlament', 'od_o': 'od_parlament', 'do_o': 'do_parlament'}, inplace=True)
self.tbl['poslanci'] = pd.merge(self.tbl['poslanci'], parlament[['id_osoba', 'id_parlament', 'od_parlament', 'do_parlament']], on='id_osoba', how="left")
self.tbl['poslanci'] = self.drop_by_inconsistency(self.tbl['poslanci'], suffix, 0.1, 'poslanci', 'zarazeni_osoby')
self.meta.nastav_hodnotu('id_parlament', {"popis": 'Identifikátor parlamentu, jehož byli poslanci členy, viz Organy:id_organ', 'tabulka': 'df', 'vlastni': True})
self.meta.nastav_hodnotu('od_parlament', {"popis": 'Datum začátku zařazení poslanců do parlamentu, viz Organy:od_o', 'tabulka': 'df', 'vlastni': True})
self.meta.nastav_hodnotu('do_parlament', {"popis": 'Datum konce zařazení poslanců do parlamentu, viz Organy:do_o', 'tabulka': 'df', 'vlastni': True})
# Připoj informace o posledním poslaneckém klubu z 'zarazeni_osoby'.
kluby = self.tbl['zarazeni_osoby'][(self.tbl['zarazeni_osoby'].id_osoba.isin(self.tbl['poslanci'].id_osoba)) & (self.tbl['zarazeni_osoby'].nazev_typ_organ_cz == "Klub") & (self.tbl['zarazeni_osoby'].cl_funkce=='členství')].copy()
kluby = kluby.sort_values(['id_osoba', 'od_o']).groupby('id_osoba').tail(1).reset_index()
kluby.rename(columns={'id_organ': 'id_klub', 'nazev_organ_cz': 'nazev_klub_cz', 'zkratka': 'zkratka_klub', 'od_o': 'od_klub', 'do_o': 'do_klub'}, inplace=True)
self.tbl['poslanci'] = | pd.merge(self.tbl['poslanci'], kluby[['id_osoba', 'id_klub', 'nazev_klub_cz', 'zkratka_klub', 'od_klub', 'do_klub']], on='id_osoba', how="left") | pandas.merge |
import pandas as pd
import pickle
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
import numpy as np
import datetime as dt
from LDA import remove_stopwords, lemmatization, make_bigrams, sent_to_words
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# LOAD CLUSTERING MODEL
with open("data/cluster_model.pkl", "rb") as f:
cluster_model = pickle.load(f)
# LOAD LDA MODEL
lda_model = gensim.models.LdaModel.load('data/LDA/lda.model')
id2word = corpora.Dictionary.load('data/LDA/lda.model.id2word')
def get_interests():
"""
Load the raw interest csv file.
:return: The full interest.csv file in pandas dataframe
"""
interest = pd.read_csv('data/interest.csv')
return(interest)
def get_posts():
"""
Load the raw posts csv file.
:return: The full posts.csv file in pandas dataframe
"""
posts = pd.read_csv('data/posts.csv')
return(posts)
def get_users():
"""
Load the raw users csv file.
:return: The full users.csv file in pandas dataframe
"""
users = pd.read_csv('data/users.csv')
return(users)
def filter_posts(uid,date):
"""
Returns posts that have been filtered to be before a given date and aren't owned by the user
:param uid (str): user-id to filter by
:param date (str): date value to filter by
:return: pandas dataframe filtered of any posts greater than date and not owned by user
"""
posts = get_posts()
posts = posts[posts['uid'] != uid]
posts = posts[posts['post_time'] < date]
return posts
def get_user_data(uid):
"""
Returns the selected user account information
:param uid (str): user-id
:return: single-row pandas dataframe of user account information
"""
users = get_users()
user = users[users['uid'] == uid].reset_index(drop=True)
return user
def get_user_interest(uid):
"""
Returns the selected user interest information
:param uid (str): user-id
:return: single-row pandas dataframe of user interest information
"""
interests = get_interests()
interest = interests[interests['uid'] == uid].reset_index(drop=True)
return interest
def cluster_user(uid):
"""
Returns categorised ID of the selected user from the clustering model
:param uid (str): user-id
:return: single integer value of ID category
"""
# Load needed data for user
users = get_user_data(uid)
interests = get_user_interest(uid)
# Create Age Buckets for clustering
users['date'] = | pd.to_datetime(users['dob'], format='%d/%m/%Y', errors='coerce') | pandas.to_datetime |
# --------------
# import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings('ignore')
# Code starts here
# Load dataset
df = pd.read_json(path, lines=True)
df.columns = [x.replace(" ","_") for x in df.columns]
missing_data = df.count()
df.drop(['waist', 'bust', 'user_name','review_text','review_summary','shoe_size','shoe_width'], axis = 1, inplace = True)
X = df.drop(["fit"], axis = 1)
y = df["fit"].copy()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 6)
# Code ends here
# --------------
def plot_barh(df,col, cmap = None, stacked=False, norm = None):
df.plot(kind='barh', colormap=cmap, stacked=stacked)
fig = plt.gcf()
fig.set_size_inches(24,12)
plt.title("Category vs {}-feedback - cloth {}".format(col, '(Normalized)' if norm else ''), fontsize= 20)
plt.ylabel('Category', fontsize = 18)
plot = plt.xlabel('Frequency', fontsize=18)
# Code starts here
g_by_category = df.groupby(['category'])
cat_fit = g_by_category['fit'].value_counts()
cat_fit = cat_fit.unstack()
cat_fit.plot.bar()
# Code ends here
# --------------
# Code starts here
cat_len = g_by_category['length'].value_counts()
cat_len = cat_fit.unstack()
plot_barh(cat_len, 'length')
# Code ends here
# --------------
# Code starts here
def get_cms(x):
if(isinstance(x, float)):
return 0
else:
if(len(x) < 4):
return (int(x[0])*30.48)
else:
return (int(x[0])*30.48) + (int(x[4:-2])*2.54)
#print(X_train['height'][9])
X_train['height'] = X_train['height'].apply(get_cms)
X_test.height = X_test['height'].apply(get_cms)
#print(X_train.height[:2])
# Code ends here
# --------------
# Code starts here
missing_values_train = X_train.isna()
missing_values_test = X_test.isna()
X_train[['height', 'length', 'quality']].dropna(inplace = True)
X_test[['height', 'length', 'quality']].dropna(inplace = True)
#print(missing_values_train['height'] | missing_values_train['length'] | missing_values_train['quality'])
y_train = y_train[~(missing_values_train['height'] | missing_values_train['length'] | missing_values_train['quality'])]
y_test = y_test[~(missing_values_test['height'] | missing_values_test['length'] | missing_values_test['quality'])]
#y_test = y_test[~missing_values_test[['height', 'length', 'quality']]]
bra_mean_train = X_train['bra_size'].mean()
bra_mean_test = X_test['bra_size'].mean()
hips_mean_train = X_train['hips'].mean()
hips_mean_test = X_test['hips'].mean()
X_train['bra_size'].fillna(bra_mean_train, inplace = True)
X_test['bra_size'].fillna(bra_mean_test, inplace = True)
X_train['hips'].fillna(hips_mean_train, inplace = True)
X_test['hips'].fillna(hips_mean_test, inplace = True)
mode_1 = X_train['cup_size'].mode()[0]
mode_2 = X_test['cup_size'].mode()[0]
X_train['cup_size'].fillna(mode_1, inplace = True)
X_test['cup_size'].fillna(mode_2, inplace = True)
# Code ends here
# --------------
# Code starts here
X_train = pd.get_dummies(data=X_train, columns = ['category','cup_size','length'])
X_test = | pd.get_dummies(data=X_test, columns = ['category','cup_size','length']) | pandas.get_dummies |
import json
import pandas as pd
import time
"""
需要一下文件:
1、预测的json:bbox_level{}_test_results.json
2、test集的json:test.json
3、sample_submission.csv
"""
LABLE_LEVEL = 4
SCORE_THRESHOLD = 0.001
def json_to_dict(json_file_dir):
with open(json_file_dir, "r") as json_file:
json_dict = json.load(json_file)
json_file.close()
return json_dict
def get_threshold_result_list(label_level=LABLE_LEVEL, score_threshold=SCORE_THRESHOLD):
detect_result_list = json_to_dict('bbox_level{}_test_results.json'.format(label_level)) # detect_result_list 是一个list # {'image_id': 2020005391, 'category_id': 43, 'bbox': [150.59866333007812, 332.810791015625, 370.6794128417969, 480.145263671875], 'score': 0.007447981275618076}
result_Threshold_list = []
for result in detect_result_list:
if result['score'] > score_threshold:
result_Threshold_list.append(result)
print("There are {} bboxes".format(len(result_Threshold_list)))
return result_Threshold_list
def get_images_categories_info(label_level=LABLE_LEVEL):
image_name_id_dict = {}
image_id_name_dict = {}
image_id_WH_dict = {}
original_id_dict = {}
id_original_dict = {}
images_and_categories_dict = json_to_dict('test.json')
images = images_and_categories_dict['images']
categories = images_and_categories_dict['categories']
for i in images:
image_name_id_dict[i['file_name']] = i['id']
image_id_name_dict[i['id']] = i['file_name']
image_id_WH_dict[i['id']] = [i["width"], i["height"]] # 是一个list
for i in categories:
original_id_dict[i["original_id"]] = i['id']
id_original_dict[i['id']] = i['original_id']
return image_name_id_dict, image_id_name_dict, image_id_WH_dict, original_id_dict, id_original_dict
def write_jsonresult_to_csv():
ImageId = []
PredictionString = []
result_Threshold_list = get_threshold_result_list(LABLE_LEVEL, SCORE_THRESHOLD)
_, image_id_name_dict, image_id_WH_dict, _, id_original_dict = get_images_categories_info(label_level=LABLE_LEVEL)
for bbox in result_Threshold_list:
image_id = bbox['image_id']
ImageId.append(image_id_name_dict[image_id][:-4])
image_W = image_id_WH_dict[image_id][0]
image_H = image_id_WH_dict[image_id][1]
bbox_xmin = bbox['bbox'][0]/image_W
bbox_ymin = bbox['bbox'][1]/image_H
bbox_xmax = (bbox['bbox'][0] + bbox['bbox'][2])/image_W
bbox_ymax = (bbox['bbox'][1] + bbox['bbox'][3])/image_H
original_label = id_original_dict[bbox['category_id']]
Confidence = bbox['score']
predictionstring = original_label + ' ' + str(Confidence) + ' ' + str(bbox_xmin)+ ' ' + str(bbox_ymin)+ ' ' + str(bbox_xmax)+ ' ' + str(bbox_ymax) + ' '
PredictionString.append(predictionstring)
print(len(ImageId))
print(len(PredictionString))
sample_csv = pd.read_csv('sample_submission.csv')
sample_csv["PredictionString"] = ""
# sample_ImageId = sample_csv["ImageId"].values.tolist()
series_imageid = pd.Series(ImageId)
series_predictionstring = pd.Series(PredictionString)
# 写入并按图片名合并结果
data = {'ImageId':series_imageid, "PredictionString":series_predictionstring}
df = pd.DataFrame(data)
df = | pd.concat([sample_csv, df], ignore_index=True) | pandas.concat |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_record_batch(tsc,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
class Test_Parquet_IO:
def test_from_parquet_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
pq.write_table(table,'test.parquet')
testData = from_parquet('test.parquet','time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
os.remove('test.parquet')
def test_from_parquet_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
pq.write_table(table,'test_collection.parquet')
testData = from_parquet('test_collection.parquet','time','category')
assert tsc == testData
os.remove('test_collection.parquet')
###########
def test_to_parquet_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
to_parquet(
'test.parquet',
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
os.remove('test.parquet')
def test_to_parquet_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,False,True,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_parquet('test.parquet',tsc,True,False,'ignore')
os.remove('test.parquet')
def test_to_parquet_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_parquet(
'test.parquet',
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
testData = pq.read_table('test.parquet').to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas('test.parquet',tsc,True,True,'ignore')
def test_to_parquet_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = | pd.DataFrame(expect_collection_noExpand['pad']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on 26 Aug 2021
@author: <NAME>
"""
import numpy as np
import pandas as pd
import warnings
from multisim import matprops as mp
from multisim import ut as ut
class Meters:
def __init__(self, simenv, start_time):
self._simenv = simenv # save for attribute lookup
self.start_time = pd.to_datetime(start_time)
# get resampling specs:
if self._simenv._disk_store['resample']:
self._resample = True
self._resample_freq = self._simenv._disk_store['resample_freq']
else:
self._resample = False
self._resample_freq = None
# get base simenv timevec:
self._time_index = pd.to_datetime(
self._simenv.time_vec, origin=self.start_time, unit='s'
)
# get meter timevec, depending on resampling or not
if not self._resample:
self._meter_time_index = self._time_index
else:
# etwas umstaendlich, aber einfach:
self._meter_time_index = (
pd.Series(index=self._time_index, data=np.nan)
.resample(self._resample_freq)
.mean()
.interpolate()
.index
)
self.meters = {}
self.meters['heat_meter'] = {}
self.meters['temperature'] = {}
self.meters['massflow'] = {}
self.meters['volumeflow'] = {}
# make multiindex name specifier:
self.__midx_names = ['type', 'name', 'sensor']
mid = pd.MultiIndex( # make empty multiindex
levels=[[]] * 3, codes=[[]] * 3, names=['type', 'name', 'sensor']
)
# make empty dataframe
self.meters_df = pd.DataFrame(
columns=mid, index=self._meter_time_index
)
# also save meters and dataframe to SimEnv:
self._simenv.meters = self.meters
self._simenv.meters_df = self.meters_df
# and check if disksaving is activated and if yes, save meters to disk
self._disk_saving = self._simenv._SimEnv__save_to_disk
if self._disk_saving:
self._dstore = self._simenv._disk_store['store']
def heat_meter(
self,
*,
name,
warm_part,
warm_cell,
cold_part,
cold_cell,
massflow_from_hot_part=False,
full_output=False,
evenly_spaced=False,
freq='1s',
how='interpolate'
):
"""
Calculate heat meter values from cold to warm part.
This classmethod calculates the heat flow between two parts and their
selected cells. The first part defines the forward flow and should be
the warm part/flow by definition, while the second part defines the
cold return flow **and the massflow**.
The following values are calculated:
- heatflow in [kW]
- flown heat Megajoule, cumulative in [MJ]
- flown heat kWh, cumulative in [kWh]
- massflow in [kg/s]
- volume flow in [m^3/s]
- flown mass in [kg]
- flown volume in [m^3]
- forward flow temperature in [°C]
- return flow temperature in [°C]
"""
err_str = (
'`name=\'{0}\'` has already been assigned to '
'another heat meter.'.format(name)
)
assert name not in self.meters['heat_meter'], err_str
# check if parts and cell indices exist
self._simenv._check_isinrange(
part=warm_part, index=warm_cell, target_array='temperature'
)
self._simenv._check_isinrange(
part=cold_part, index=cold_cell, target_array='temperature'
)
# get massflow depending on selected source:
if massflow_from_hot_part:
fidx_dm = self._simenv._massflow_idx_from_temp_idx(
part=warm_part, index=warm_cell
)
mflow = self._simenv.parts[warm_part].res_dm[:, fidx_dm]
else: # massflow from cold part (default)
sidx_dm = self._simenv._massflow_idx_from_temp_idx(
part=cold_part, index=cold_cell
)
mflow = self._simenv.parts[cold_part].res_dm[:, sidx_dm]
df = pd.DataFrame(index=self._time_index) # make dataframe
# forward and return flow temperature and difference
df['T_ff'] = self._simenv.parts[warm_part].res[:, warm_cell]
df['T_rf'] = self._simenv.parts[cold_part].res[:, cold_cell]
df['T_diff'] = df['T_ff'] - df['T_rf']
df['massflow_kgps'] = mflow.copy() # copy to fix this value
df['volume_flow_m3ps'] = df['massflow_kgps'] / mp.rho_water(
df['T_rf'].values
)
# get massflow backup array for only positive values:
massflow_bkp = mflow.copy() # double copy not really required, but...
df['flown_mass_kg'] = np.cumsum(
massflow_bkp * self._simenv.time_step_vec
) # cumsum with neg. vals
df['flown_volume_m3'] = (
df['volume_flow_m3ps'] * self._simenv.time_step_vec
).cumsum()
if full_output: # these are not reqlly _always_ required...
massflow_bkp[massflow_bkp < 0.0] = 0.0 # set negative to zeros
df['flown_mass_pos_kg'] = np.cumsum(
massflow_bkp * self._simenv.time_step_vec
) # only pos. cumsum
df['flown_volume_pos_m3'] = np.cumsum( # only pos. cumsum
massflow_bkp
* self._simenv.time_step_vec
/ mp.rho_water(df['T_rf'].values)
)
# get heatflow in [kW]
df['heatflow_kW'] = (
(df['T_ff'] - df['T_rf'])
* df['massflow_kgps']
/ 1e3
* (mp.cp_water(df['T_rf'].values) + mp.cp_water(df['T_ff'].values))
/ 2
)
hf_bkp = df['heatflow_kW'].copy() # bkp heatflow for only-pos.-cumsum
df['flown_heat_MJ'] = (
np.cumsum(hf_bkp * self._simenv.time_step_vec) / 1e3
)
df['flown_heat_kWh'] = df['flown_heat_MJ'] / 3.6
if full_output: # these are not reqlly _always_ required...
hf_bkp[hf_bkp < 0.0] = 0.0 # set negative to zeros
df['flown_heat_pos_MJ'] = (
np.cumsum(hf_bkp * self._simenv.time_step_vec) / 1e3
)
df['flown_heat_pos_kWh'] = df['flown_heat_pos_MJ'] / 3.6
# if even meter is requested, make it an even meter:
if evenly_spaced:
raise DeprecationWarning(
'This is deprecated, since this may cause double resampling '
'if simulation data is resampled (default), causing '
'matplotlib to freeze. If additional resampling is required, '
'please simply resample by hand. This method has done nothing '
'else than simple resampling before...'
)
df = ut.process_unevenly_spaced_timeseries(
data=df, freq=freq, how=how
)
# resample if specified in simenv:
df = self._resample_meter(df)
# make multiindex for merging with dataframe for all sensors:
tuples = []
for col in df.columns:
tuples.append(('heat_meter', name, col))
midx = pd.MultiIndex.from_tuples( # make multiindex
tuples=tuples, names=self.__midx_names
)
self.meters_df[midx] = df # add to dataframe
# save to dict:
self.meters['heat_meter'][name] = df
if self._disk_saving: # save to disk:
self._save_to_disk(name, df)
def temperature(self, *, name, part, cell):
"""
Add a temperature sensor to meters.
"""
# check if part and cell indices exist
self._simenv._check_isinrange(
part=part, index=cell, target_array='temperature'
)
err_str = (
'`name={0}` has already been assigned to another '
'temperature sensor.'.format(name)
)
assert name not in self.meters['temperature'], err_str
df = pd.DataFrame(index=self._time_index) # make dataframe
# get temperature
if self._simenv.parts[part].res.ndim == 2:
df['T_' + name] = self._simenv.parts[part].res[:, cell]
elif self._simenv.parts[part].res.ndim > 2:
# if ndim of temp array > 1, res.ndim > 3. reshape to a flat
# ndim=1 array PER timestep to index with flat index.
df['T_' + name] = self._simenv.parts[part].res.reshape(
-1, np.prod(self._simenv.parts[part].res.shape[1:])
)[:, cell]
# resample if specified in simenv:
df = self._resample_meter(df)
# make multiindex for merging with dataframe for all sensors:
tuples = []
for col in df.columns:
tuples.append(('temperature', name, col))
midx = pd.MultiIndex.from_tuples( # make multiindex
tuples=tuples, names=self.__midx_names
)
self.meters_df[midx] = df # add to dataframe
self.meters['temperature'][name] = df # save to dict
if self._disk_saving: # save to disk:
self._save_to_disk(name, df)
def massflow(self, *, name, part, cell):
"""
Add a massflow sensor in [kg/s] to meters.
"""
# check if part and cell indices exist
self._simenv._check_isinrange(
part=part, index=cell, target_array='temperature'
)
# get index to massflow array
idx_dm = self._simenv._massflow_idx_from_temp_idx(
part=part, index=cell
)
err_str = (
'`name=' + str(name) + '` has already been assigned to another '
'massflow sensor.'
)
assert name not in self.meters['massflow'], err_str
df = | pd.DataFrame(index=self._time_index) | pandas.DataFrame |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1'])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
def test_calc_rets_two_generics_two_asts():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets1 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')])
rets2 = pd.Series([0.1, 0.15, 0.05, 0.1, 0.4], index=idx)
rets = {"CL": rets1, "CO": rets2}
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights1 = pd.DataFrame(vals, index=widx, columns=["CL0", "CL1"])
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'COF5'),
(TS('2015-01-03'), 'COG5'),
(TS('2015-01-04'), 'COF5'),
(TS('2015-01-04'), 'COG5'),
(TS('2015-01-04'), 'COH5')
])
weights2 = pd.DataFrame(vals, index=widx, columns=["CO0", "CO1"])
weights = {"CL": weights1, "CO": weights2}
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15, 0.1, 0.15],
[0.075, 0.45, 0.075, 0.25],
[-0.5, 0.2, pd.np.NaN, pd.np.NaN]],
index=weights["CL"].index.levels[0],
columns=['CL0', 'CL1', 'CO0', 'CO1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_instr_rets_key_error():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5')])
irets = pd.Series([0.02, 0.01, 0.012], index=idx)
vals = [1, 1/2, 1/2, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(KeyError):
util.calc_rets(irets, weights)
def test_calc_rets_nan_instr_rets():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([pd.np.NaN, pd.np.NaN, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([pd.np.NaN, pd.np.NaN, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_missing_weight():
# see https://github.com/matthewgilbert/mapping/issues/8
# missing weight for return
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
rets = pd.Series([0.02, -0.03, 0.06], index=idx)
vals = [1, 1]
widx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')
])
weights = pd.DataFrame(vals, index=widx, columns=["CL1"])
with pytest.raises(ValueError):
util.calc_rets(rets, weights)
# extra instrument
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights1 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-02'), 'CLH5'),
(TS('2015-01-03'), 'CLH5'), # extra day for no weight instrument
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLH5')
])
rets = pd.Series([0.02, -0.03, 0.06, 0.05, 0.01], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights1)
# leading / trailing returns
idx = pd.MultiIndex.from_tuples([(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5')])
weights2 = pd.DataFrame(1, index=idx, columns=["CL1"])
idx = pd.MultiIndex.from_tuples([(TS('2015-01-01'), 'CLF5'),
(TS('2015-01-02'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-05'), 'CLF5')])
rets = pd.Series([0.02, -0.03, 0.06, 0.05], index=idx)
with pytest.raises(ValueError):
util.calc_rets(rets, weights2)
def test_to_notional_empty():
instrs = pd.Series()
prices = pd.Series()
multipliers = pd.Series()
res_exp = pd.Series()
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_same_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_extra_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2, 13.1], index=['CLZ6', 'COZ6',
'GCZ6', 'extra'])
res_exp = pd.Series([-30.20, 2 * 30.5, 10.2],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_missing_prices():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
res_exp = pd.Series([-30.20, 2 * 30.5, pd.np.NaN],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers)
assert_series_equal(res, res_exp)
def test_to_notional_different_fx():
instrs = pd.Series([-1, 2, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
multipliers = pd.Series([1, 1, 1], index=['CLZ6', 'COZ6', 'GCZ6'])
prices = pd.Series([30.20, 30.5, 10.2], index=['CLZ6', 'COZ6', 'GCZ6'])
instr_fx = pd.Series(['USD', 'CAD', 'AUD'],
index=['CLZ6', 'COZ6', 'GCZ6'])
fx_rates = pd.Series([1.32, 0.8], index=['USDCAD', 'AUDUSD'])
res_exp = pd.Series([-30.20, 2 * 30.5 / 1.32, 10.2 * 0.8],
index=['CLZ6', 'COZ6', 'GCZ6'])
res = util.to_notional(instrs, prices, multipliers, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
assert_series_equal(res, res_exp)
def test_to_notional_duplicates():
instrs = pd.Series([1, 1], index=['A', 'A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37, 200.37], index=['A', 'A'])
mults = pd.Series([100], index=['A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100, 100], index=['A', 'A'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD', 'USD'], index=['A', 'A'])
fx_rate = pd.Series([1.32], index=['USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
desired_ccy = "CAD"
instr_fx = pd.Series(['USD'], index=['A'])
fx_rate = pd.Series([1.32, 1.32], index=['USDCAD', 'USDCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy,
instr_fx, fx_rate)
def test_to_notional_bad_fx():
instrs = pd.Series([1], index=['A'])
prices = pd.Series([200.37], index=['A'])
mults = pd.Series([100], index=['A'])
instr_fx = pd.Series(['JPY'], index=['A'])
fx_rates = pd.Series([1.32], index=['GBPCAD'])
with pytest.raises(ValueError):
util.to_notional(instrs, prices, mults, desired_ccy='USD',
instr_fx=instr_fx, fx_rates=fx_rates)
def test_to_contracts_rounder():
prices = pd.Series([30.20, 30.5], index=['CLZ6', 'COZ6'])
multipliers = pd.Series([1, 1], index=['CLZ6', 'COZ6'])
# 30.19 / 30.20 is slightly less than 1 so will round to 0
notional = | pd.Series([30.19, 2 * 30.5], index=['CLZ6', 'COZ6']) | pandas.Series |
import os
import pickle
import random
import numpy as np
import pandas as pd
from sklearn.neighbors import KDTree
import gputransform
import config as cfg
#####For training and test data split#####
cfg.SAMPLE_INTERVAL_TEST = 1.5
x_width = 150
y_width = 150
# For Oxford
p1 = [5735712.768124,620084.402381]
p2 = [5735611.299219,620540.270327]
p3 = [5735237.358209,620543.094379]
p4 = [5734749.303802,619932.693364]
# For University Sector
p5 = [363621.292362,142864.19756]
p6 = [364788.795462,143125.746609]
p7 = [363597.507711,144011.414174]
# For Residential Area
p8 = [360895.486453,144999.915143]
p9 = [362357.024536,144894.825301]
p10 = [361368.907155,145209.663042]
p_dict = {"oxford":[p1,p2,p3,p4], "university":[
p5,p6,p7], "residential": [p8,p9,p10], "business":[]}
p = [-50.0, 150.0, -250.0, 150.0]
# check if the location is in the test set
def check_in_test_set(northing, easting, points):
in_test_set = False
if(points[0] < northing and northing < points[1] and points[2] < easting and easting < points[3]):
in_test_set = True
return in_test_set
# check if it's a new place
def check_submap(northing, easting, prev_northing, prev_easting):
is_submap = False
euclidean = np.abs(np.sqrt((prev_northing-northing)**2 + (prev_easting-easting)**2))
if(euclidean < cfg.SAMPLE_INTERVAL_TEST and euclidean >= (cfg.SAMPLE_INTERVAL_TEST - 0.5)):
is_submap = True
return is_submap
# find closest place timestamp with index returned
def find_closest_timestamp(A, target):
#A must be sorted
idx = A.searchsorted(target)
idx = np.clip(idx, 1, len(A)-1)
left = A[idx-1]
right = A[idx]
idx -= target - left < right - target
return idx
# dump the tuples to pickle files for training
def output_to_file(output, filename):
with open(filename, 'wb') as handle:
pickle.dump(output, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Done ", filename)
# construct evaluation tuples
def construct_query_and_database_sets(base_path, runs_folder, folders, pointcloud_fols, filename, p, output_name):
database_trees = []
test_trees = []
##### construct kdtree
for folder in folders:
print(folder)
velo_file = []
df_database = pd.DataFrame(columns=['file','northing','easting','yaw'])
df_test = pd.DataFrame(columns=['file','northing','easting','yaw'])
gt_filename = "gt_occ_3m.csv"
df_locations = pd.read_csv(os.path.join(
base_path,runs_folder,folder,gt_filename), header=0, names = ['file','northing','easting','yaw'], low_memory=False)
gt_test_filename = "gt_occ_test_3m.csv"
df_test = pd.read_csv(os.path.join(
base_path,runs_folder,folder,gt_test_filename), header=0, names = ['file','northing','easting','yaw'],low_memory=False)
for index, row in df_locations.iterrows():
df_database = df_database.append(row, ignore_index=True)
for index, row in df_test.iterrows():
df_test = df_test.append(row, ignore_index=True)
df_database = df_database.append(row, ignore_index=True)
database_tree = KDTree(df_database[['northing','easting']])
test_tree = KDTree(df_test[['northing','easting']])
database_trees.append(database_tree)
test_trees.append(test_tree)
test_sets = []
database_sets = []
##### construct corresponding database
for folder in folders:
database = {}
test = {}
velo_file = []
df_velo = | pd.DataFrame(columns=['file','northing','easting','yaw']) | pandas.DataFrame |
#========================================================================
# Python library imports
#========================================================================
import math
import pandas as pd
import operator as op
from functools import reduce
#========================================================================
# Function to compute the combinatorics of given n & r
#========================================================================
def ncr(n, r):
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer // denom
#========================================================================
# Generic function to calculate binomial function
#========================================================================
def binomial(n, p, x, k=2):
probability_for_p = ncr((k-1)*n,x) * math.pow(p,x) * math.pow((1-p),((k-1)*n)-x)
return probability_for_p
def single_sampling_plan(AlphaLow, AlphaHigh, BetaLow, BetaHigh, p1, p2):
'''
Input: User specified values - AlphaLow, AlphaHigh, BetaLow, BetaHigh, p1, p2
Function: Computes the probability for both p1 & p2 for each pair of (n,c)
using the single sampling plan formula, derives alpha & beta values
from the computed probabilities, tests if the derived values are
within user specified range (low and high) and returns all that
satisfy this criteria as admissible plan(s)
Output: Admissible plan(s) as a dataframe
'''
single_plans_df = pd.DataFrame()
for c in range(0, 15):
for n in range(5, 501):
probability_for_p1 = 0
probability_for_p2 = 0
for x in range(c+1):
probability_for_p1 = probability_for_p1 + binomial(n=n, p=p1, x=x)
probability_for_p2 = probability_for_p2 + binomial(n=n, p=p2, x=x)
tempalpha = 1 - probability_for_p1
tempbeta = probability_for_p2
if ((tempbeta <= BetaHigh) and (BetaLow <= tempbeta) and
(tempalpha <= AlphaHigh) and (AlphaLow <= tempalpha)):
tempalphaplusbeta = tempalpha + tempbeta
single_plans_df = single_plans_df.append({"n":n, "c":c, "Alpha":tempalpha,
"Beta":tempbeta,
"Alpha+Beta":tempalphaplusbeta},
ignore_index=True)
break
return single_plans_df
#========================================================================
# Function definition for ChSP-1
#========================================================================
def chsp_1(AlphaLow, AlphaHigh, BetaLow, BetaHigh, p1, p2):
'''
Input: User specified values - AlphaLow, AlphaHigh, BetaLow, BetaHigh, p1, p2
Function: Computes the probability for both p1 & p2 for each pair of (n,i)
using the ChSP-1 formula, derives alpha & beta values
from the computed probabilities, tests if the derived values are
within user specified range (low and high) and returns all that
satisfy this criteria as admissible plan(s)
Output: Admissible plan(s) as a dataframe
'''
chsp_1_df = | pd.DataFrame() | pandas.DataFrame |
from typing import List
import numpy as np
import pandas as pd
import stockstats
import talib
import copy
class BasicProcessor:
def __init__(self, data_source: str, start_date, end_date, time_interval, **kwargs):
assert data_source in {
"alpaca",
"baostock",
"ccxt",
"binance",
"iexcloud",
"joinquant",
"quandl",
"quantconnect",
"ricequant",
"wrds",
"yahoofinance",
"tusharepro",
}, "Data source input is NOT supported yet."
self.data_source: str = data_source
self.start_date: str = start_date
self.end_date: str = end_date
self.time_interval: str = time_interval # standard time_interval
# transferred_time_interval will be supported in the future.
# self.nonstandard_time_interval: str = self.calc_nonstandard_time_interval() # transferred time_interval of this processor
self.time_zone: str = ""
self.dataframe: pd.DataFrame = pd.DataFrame()
self.dictnumpy: dict = {} # e.g., self.dictnumpy["open"] = np.array([1, 2, 3]), self.dictnumpy["close"] = np.array([1, 2, 3])
def download_data(self, ticker_list: List[str]):
pass
def clean_data(self):
if "date" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'date': 'time'}, inplace=True)
if "datetime" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'datetime': 'time'}, inplace=True)
if self.data_source == "ccxt":
self.dataframe.rename(columns={'index': 'time'}, inplace=True)
if self.data_source == 'ricequant':
''' RiceQuant data is already cleaned, we only need to transform data format here.
No need for filling NaN data'''
self.dataframe.rename(columns={'order_book_id': 'tic'}, inplace=True)
# raw df uses multi-index (tic,time), reset it to single index (time)
self.dataframe.reset_index(level=[0, 1], inplace=True)
# check if there is NaN values
assert not self.dataframe.isnull().values.any()
elif self.data_source == 'baostock':
self.dataframe.rename(columns={'code': 'tic'}, inplace=True)
self.dataframe.dropna(inplace=True)
# adj_close: adjusted close price
if 'adj_close' not in self.dataframe.columns.values.tolist():
self.dataframe['adj_close'] = self.dataframe['close']
self.dataframe.sort_values(by=['time', 'tic'], inplace=True)
self.dataframe = self.dataframe[['tic', 'time', 'open', 'high', 'low', 'close', 'adj_close', 'volume']]
def get_trading_days(self, start: str, end: str) -> List[str]:
if self.data_source in ["binance", "ccxt", "quantconnect", "ricequant", "tusharepro"]:
print(f"Calculate get_trading_days not supported for {self.data_source} yet.")
return None
# use_stockstats_or_talib: 0 (stockstats, default), or 1 (use talib). Users can choose the method.
def add_technical_indicator(self, tech_indicator_list: List[str], use_stockstats_or_talib: int = 0):
"""
calculate technical indicators
use stockstats/talib package to add technical inidactors
:param data: (df) pandas dataframe
:return: (df) pandas dataframe
"""
if "date" in self.dataframe.columns.values.tolist():
self.dataframe.rename(columns={'date': 'time'}, inplace=True)
if self.data_source == "ccxt":
self.dataframe.rename(columns={'index': 'time'}, inplace=True)
self.dataframe.reset_index(drop=False, inplace=True)
if "level_1" in self.dataframe.columns:
self.dataframe.drop(columns=["level_1"], inplace=True)
if "level_0" in self.dataframe.columns and "tic" not in self.dataframe.columns:
self.dataframe.rename(columns={"level_0": "tic"}, inplace=True)
assert use_stockstats_or_talib in {0, 1}
print("tech_indicator_list: ", tech_indicator_list)
if use_stockstats_or_talib == 0: # use stockstats
stock = stockstats.StockDataFrame.retype(self.dataframe)
unique_ticker = stock.tic.unique()
for indicator in tech_indicator_list:
print("indicator: ", indicator)
indicator_df = | pd.DataFrame() | pandas.DataFrame |
import requests
import pandas as pd
import re
from bs4 import BeautifulSoup
url=requests.get("http://www.worldometers.info/world-population/india-population/")
t=url.text
so=BeautifulSoup(t,'html.parser')
all_t=so.findAll('table', class_="table table-striped table-bordered table-hover table-condensed table-list")#Use to find stats tabl
d1=pd.DataFrame([])
i=0
j=0
b=[]
d1=pd.DataFrame()
for j in all_t[0].findAll('td'):
b.append(j.text)
while(i<=(208-13)):
d1=d1.append(pd.DataFrame([b[i:i+13]]) )
i=i+13
d1.apply(pd.to_numeric, errors='ignore')
listq=pd.Series.tolist(d1[0:16][0])
list1=pd.Series.tolist(d1[0:16][1])
list2=pd.Series.tolist(d1[0:16][2])
list3=pd.Series.tolist(d1[0:16][3])
list4=pd.Series.tolist(d1[0:16][4])
list5=pd.Series.tolist(d1[0:16][5])
list6=pd.Series.tolist(d1[0:16][6])
list7=pd.Series.tolist(d1[0:16][7])
list8=pd.Series.tolist(d1[0:16][8])
list9=pd.Series.tolist(d1[0:16][9])
list10=pd.Series.tolist(d1[0:16][10])
#forecast table
c=[]
for j in all_t[1].findAll('td'):
c.append(j.text)
bv=pd.DataFrame()
i=0
while(i<=(91-13)):
bv=bv.append(pd.DataFrame([c[i:i+13]]) )
i=i+13
listq1=pd.Series.tolist(bv[0:7][0])
list11=pd.Series.tolist(bv[0:7][1])
list21=pd.Series.tolist(bv[0:7][2])
list31=pd.Series.tolist(bv[0:7][3])
list41=pd.Series.tolist(bv[0:7][4])
list51=pd.Series.tolist(bv[0:7][5])
list61=pd.Series.tolist(bv[0:7][6])
list71=pd.Series.tolist(bv[0:7][7])
list81= | pd.Series.tolist(bv[0:7][8]) | pandas.Series.tolist |
import numpy as np
from scipy import sparse
import pandas as pd
import networkx as nx
from cidre import utils
def detect(
A, threshold, is_excessive, min_group_edge_num=0,
):
"""
CIDRE algorithm
Parameters
-----------
A : scipy sparse matrix
Adjacency matrix
threshold : float
The algorithm seeks the groups of nodes that have a
donor score or a recipient score larger than or equal to the threshold value.
is_excessive : filtering function
is_excessive(srg, trg, w) returns True if the edge from src to trg with weight w
is excessive. Otherwise is_excessive(srg, trg, w) returns False.
min_group_edge_num: int (Optional; Default 0)
The minimum number of edges that the detected group has.
If the algoirthm finds a group of nodes that contain less than or equal to min_edge_num,
the algorithm exlcudes the group from the list of detected groups.
Returns
-------
df : pandas.DataFrame
Table of nodes detected by CIDRE. df consists of the following columns:
- node_labels : label of nodes
- group id : ID of the group to which the node belongs
- donor_score : donor score for the node
- recipient_score : recipient score for the node
- is_donor : True if the node is a donor. Otherwise False.
- is_recipient : True if the node is a recipient. Otherwise False.
"""
# Filter edges before grouping
src, dst, w = utils.find_non_self_loop_edges(A)
excessive_edges = is_excessive(src, dst, w)
A_pruned = utils.construct_adjacency_matrix(
src[excessive_edges], dst[excessive_edges], w[excessive_edges], A.shape[0]
)
# Find the group of nodes U with
# a donor score or a recipient score
# larger than or equal to the threshold
num_nodes = A.shape[0]
U = np.ones(num_nodes)
indeg_zero_truncated = np.maximum(np.array(A.sum(axis=0)).ravel(), 1.0)
outdeg_zero_truncated = np.maximum(np.array(A.sum(axis=1)).ravel(), 1.0)
while True:
# Compute the donor score, recipient score and cartel score
donor_score = np.multiply(U, (A_pruned @ U) / outdeg_zero_truncated)
recipient_score = np.multiply(U, (U @ A_pruned) / indeg_zero_truncated)
# Drop the nodes with a cartel score < threshold
drop_from_U = (U > 0) * (np.maximum(donor_score, recipient_score) < threshold)
# Break the loop if no node is dropped from the cartel
if np.any(drop_from_U) == False:
break
# Otherwise, drop the nodes from the cartel
U[drop_from_U] = 0
# Find the nodes in U
nodes_in_U = np.where(U)[0]
# Partition U into disjoint groups, U_l
A_U = A_pruned[:, nodes_in_U][nodes_in_U, :].copy()
net_U = nx.from_scipy_sparse_matrix(A_U, create_using=nx.DiGraph)
net_U.remove_nodes_from(list(nx.isolates(net_U)))
df_Ul_list = []
for _, _nd in enumerate(nx.weakly_connected_components(net_U)):
nodes_in_Ul = nodes_in_U[np.array(list(_nd))]
# Remove the group U_l if
# U_l does not contain edges less than or equal to
# min_group_edge_num
A_Ul = A[nodes_in_Ul, :][:, nodes_in_Ul]
num_edges_in_Ul = A_Ul.sum() - A_Ul.diagonal().sum()
if num_edges_in_Ul <= min_group_edge_num:
continue
# Pack the results into a pandas
df_Ul = pd.DataFrame(
{
"node_id": nodes_in_Ul,
"group_id": np.ones_like(nodes_in_Ul) * len(df_Ul_list),
"recipient_score": recipient_score[nodes_in_Ul],
"donor_score": donor_score[nodes_in_Ul],
"is_recipient": (recipient_score[nodes_in_Ul] >= threshold).astype(int),
"is_donor": (donor_score[nodes_in_Ul] >= threshold).astype(int),
}
)
df_Ul_list += [df_Ul]
if df_Ul_list == []:
print("No groups found")
df_U = df_Ul_list
else:
df_U = | pd.concat(df_Ul_list, ignore_index=True) | pandas.concat |
import pandas as pd
import numpy as np
from skmob import TrajDataFrame
import datasheets
from human_id import generate_id
import functools
class WeClockExport:
def __init__(self, identifier, filename_or_file):
self.identifier = identifier
self.filename_or_file = filename_or_file
self.type = "android"
self.df = self.parse_export_file(self.filename_or_file)
def parse_export_file(self, filename_or_file) -> pd.DataFrame:
# if it's a .zip, it's an android export and needs to be treated differently.
try:
if '.zip' in filename_or_file.filename:
df = pd.read_csv(filename_or_file, compression='zip',
names = np.array(["idx", "type", "datetime", "value1", "value2", "value3", "value4", "value5", "fused"]))
self.type = "android"
return df
else:
f = filename_or_file
df = (pd.read_csv(f,
names = np.array(["idx", "id", "type", "date", "time", "value1", "value2"]),
parse_dates=[['date', 'time']]
)
.drop(['id'], axis=1)
)
self.type = "ios"
return df
except Exception as e:
print("error!", e)
return pd.DataFrame()
# cache because we call this often
@functools.cache
def geo_df(self) -> pd.DataFrame:
geodf = pd.DataFrame()
if (self.type == 'ios'):
geodf = (self.df
.query("type == 'geologging'")
.assign(value1 = lambda x: pd.to_numeric(x.value1, errors='coerce'))
.drop(['idx'], axis=1)
.rename(columns={'value1': "lat", "value2": "lng", "date_time": "datetime"})
.assign(datetime = lambda x: pd.to_datetime(x.datetime))
).dropna()
elif (self.type == 'android'):
geo_df = self.df.query("type == 'geo_logging'")
geo_df = (pd.concat([
geo_df[['type', 'datetime']],
geo_df['value1'].str.split(',', expand=True)
], axis=1)
.rename(columns={0: 'lat', 1: 'lng'}))
print("geo df:")
print(geo_df)
geodf = (geo_df
.rename(columns={'0': 'lat', '1': 'lng'})
.assign(lat = lambda x: pd.to_numeric(x.lat, errors='coerce'))
.assign(lng = lambda x: pd.to_numeric(x.lng, errors='coerce'))
.assign(datetime = lambda x: pd.to_datetime(x.datetime, errors='coerce'))
).dropna()
return geodf
# use .2 because it seems to work well heuristically. Can change in client if needed
def get_clusters(self, cluster_radius=.1, min_stops=1) -> TrajDataFrame or None:
from . import geo
# uses geo.cluster_stops to get a Data Frame with a new column for clusters
if (len(self.geo_df()) < 5):
return TrajDataFrame(pd.DataFrame())
trajectories, sdf, stop_df = geo.get_trips(self.geo_df())
return geo.cluster_stops(stop_df, cluster_radius, min_stops)
def caps(self, s):
return " ".join([s[0].upper() + s[1:] for s in str(s).split(" ")])
def to_google_sheet(self, split_tabs=True):
client = datasheets.Client(service=True)
wb_id = generate_id()
self.workbook = client.create_workbook(wb_id)
all_data_tab = self.workbook.create_tab('All Data')
all_data_tab.insert_data(self.df, index=False)
if split_tabs:
# new tabs for each data type
dtypes = self.df.type.unique()
for t in dtypes:
tabname = self.caps(t)
tab_df = self.df.query("type == @t")
if t == 'geologging':
tab_df = (tab_df.assign(value1 = lambda x: | pd.to_numeric(x.value1, errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 17:35:50 2018
@author: amal
"""
from __future__ import division
import os.path
import pandas as pd
import numpy as np
from datetime import datetime
from geopy.distance import vincenty
import matplotlib.pyplot as plt
import seaborn as sns
import json
import urllib.request as urllib2
from sklearn import preprocessing
from plotly.offline import plot
import plotly.graph_objs as go
def fetch_one_week_data(URL):
one_week_data = pd.read_csv(URL,
sep=",",
header=None,
names=[
"curr_status",
"curr_stop_sequence",
"direction_id",
"latitude",
"longitude",
"route_id",
"schedule_realtionship",
"stop_id",
"server_time",
"trip_id",
"system_time",
"vehicle_id"])
one_week_data = one_week_data[["server_time",
"route_id",
"curr_stop_sequence",
"latitude",
"longitude",
"direction_id",
"curr_status",
"schedule_realtionship",
"stop_id",
"trip_id",
"vehicle_id",
"system_time",
]]
one_week_data['curr_status'] = pd.to_numeric(one_week_data['curr_status'])
one_week_data['curr_stop_sequence'] = pd.to_numeric(one_week_data['curr_stop_sequence'])
one_week_data['direction_id'] = pd.to_numeric(one_week_data['direction_id'])
one_week_data['latitude'] = pd.to_numeric(one_week_data['latitude'])
one_week_data['longitude'] = pd.to_numeric(one_week_data['longitude'])
one_week_data['schedule_realtionship'] = | pd.to_numeric(one_week_data['schedule_realtionship']) | pandas.to_numeric |
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 1000
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = | pd.DataFrame(X) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 5 21:51:52 2018
@author: dayvsonsales
"""
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
import requests
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from sklearn.impute import SimpleImputer
print('\n - Lendo o arquivo com o dataset sobre diabetes')
data = pd.read_csv('/Users/dayvsonsales/mlclass/01_Preprocessing/diabetes_dataset.csv')
zero = ['Glucose','BloodPressure','SkinThickness','BMI','Insulin']
# Criando X and y par ao algorítmo de aprendizagem de máquina.\
print(' - Criando X e y para o algoritmo de aprendizagem a partir do arquivo diabetes_dataset')
# Caso queira modificar as colunas consideradas basta algera o array a seguir.
feature_cols = ['Glucose', 'Insulin', 'BMI']
X = data[feature_cols]
y = data.Outcome
#se colocar test_size = 0.0001 ele dá 100% mas fica viciado
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.223, random_state = 11)
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
X_train = imp.fit_transform(X_train)
X_test = imp.fit_transform(X_test)
#há varios escalares esse pega o maior absoluto
sc = MinMaxScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Criando o modelo preditivo para a base trabalhada
print(' - Criando modelo preditivo')
neigh = KNeighborsClassifier(n_neighbors = 3)
neigh.fit(X_train, y_train)
#realizando previsões com o arquivo de
print(' - Aplicando modelo e enviando para o servidor')
#data_app = pd.read_csv('/Users/dayvsonsales/mlclass/01_Preprocessing/diabetes_app.csv')
#y_pred = neigh.predict(data_app)
y_pred = neigh.predict(X_test)
print(accuracy_score(y_test,y_pred))
data_app = pd.read_csv('/Users/dayvsonsales/mlclass/01_Preprocessing/diabetes_app.csv')
##se mudar os atributos, tem que mudar aqui
data_app = data_app[feature_cols]
data_app = sc.transform(data_app)
y_pred = neigh.predict(data_app)
# Enviando previsões realizadas com o modelo para o servidor
URL = "https://aydanomachado.com/mlclass/01_Preprocessing.php"
#TODO Substituir pela sua chave aqui
DEV_KEY = "COLOCAR_SUA_KEY_AQUI"
# json para ser enviado para o servidor
data = {'dev_key':'if\'s',
'predictions': | pd.Series(y_pred) | pandas.Series |
import pandas as pd
import logging
import click
from pathlib import Path
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logger = logging.getLogger('building_info')
def generate_csv(csv_folder, csv_res):
dfs = [pd.read_csv(csv_file) for csv_file in csv_folder.iterdir()]
result = pd.concat(dfs)
result.to_csv(csv_res, index=None)
return result
def count_images(df):
cities = ["Paris", "Shanghai", "Khartoum", "Vegas"]
total_samples = df['ImageId'].unique().size
for city in cities:
city_samples = df[df['ImageId'].str.contains(city)]['ImageId'].unique().size
logger.info(f"{city}: {city_samples}/{total_samples}")
def count_files_per_city(image_folder):
cities = ["Paris", "Shanghai", "Khartoum", "Vegas"]
count_dict = {city: 0 for city in cities}
for filename in image_folder.iterdir():
for city in cities:
if city in filename.stem:
count_dict[city] += 1
for city, count in count_dict.items():
logger.info(f"{city}: {count} images")
@click.command()
@click.argument("image_folder", type=click.Path(exists=True))
@click.argument("csv_folder", type=click.Path(exists=True))
@click.argument("csv_res", type=click.Path())
def main(image_folder, csv_folder, csv_res):
csv_res = Path(csv_res)
csv_folder = Path(csv_folder)
image_folder = Path(image_folder)
if not csv_res.exists():
df = generate_csv(csv_folder, csv_res)
else:
df = | pd.read_csv(csv_res) | pandas.read_csv |
import argparse
import os
import pandas as pd
from sklearn.utils import shuffle
def split_species(specie_set, test_count, valid_count, seed=42):
# Create empty dataframes
train_set = pd.DataFrame(columns=specie_set.columns)
test_set = pd.DataFrame(columns=specie_set.columns)
valid_set = pd.DataFrame(columns=specie_set.columns)
# Get the counts for each capture event
capture_ids = specie_set.groupby(['capture_id']).image_rank_in_capture.count().reset_index(
name='count')
# shuffle them randomly
capture_ids = shuffle(capture_ids, random_state=seed)
for i, (index, row) in enumerate(capture_ids.iterrows()):
# get images for each capture id
capture_subset = specie_set.loc[specie_set['capture_id'] == row['capture_id']]
capture_count = len(capture_subset)
test_count_left = test_count - len(test_set)
valid_count_left = valid_count - len(valid_set)
# if count is less than test add them to the test
if capture_count > test_count_left and capture_count > valid_count_left:
pass
elif capture_count <= test_count_left:
test_set = test_set.append(capture_subset)
elif capture_count <= valid_count_left:
valid_set = valid_set.append(capture_subset)
# Break condition: Either all test and valid images have been extracted
# Or we have reached the end of our loop
if (len(test_set) >= test_count and len(valid_set) >= valid_count) \
or (i == (len(capture_ids) - 1)):
# if we are at the last iteration and the test_set is empty
# then we add the last capture event to the test set and
# validation is kept empty
if len(test_set) == 0:
smallest_capture_event = capture_ids.sort_values('count').iloc[0]
capture_subset = specie_set.loc[specie_set['capture_id'] == smallest_capture_event['capture_id']]
test_set = test_set.append(capture_subset)
# the remaining images (not present in test and valid set)
# are extracted to train set
train_set = specie_set.loc[~(specie_set.index.isin(test_set.index) |
specie_set.index.isin(valid_set.index))]
break
print(f"Train count and Valid count is {len(train_set)} and {len(valid_set)} respectively.\n"
f"The rest {len(test_set)} will be used for testing.")
# Check if the counts are okay
assert (len(train_set) + len(test_set) + len(valid_set) == len(specie_set))
return train_set, valid_set, test_set
def split_dataset(data=None, test_ratio=0.1, valid_ratio=0.1, seed=42):
if data is None:
print(f"No input data provided")
return None, None
# Create empty dataframes
train = pd.DataFrame(columns=data.columns)
valid = pd.DataFrame(columns=data.columns)
test = pd.DataFrame(columns=data.columns)
# Get distinct species
distinct_species = data.question__species.unique()
print(f"Number of distinct species: {len(distinct_species)}")
# For each specie divide into train and test sets
for specie in distinct_species:
# For testing
# if not specie == 'reptiles':
# continue
print(f"Splitting dataset for {specie}")
specie_subset = data.loc[data['question__species'] == specie]
specie_count = len(specie_subset)
test_count = int(specie_count * test_ratio)
valid_count = int(specie_count * valid_ratio)
train_species, valid_species, test_species = split_species(specie_set=specie_subset, test_count=test_count,
valid_count=valid_count, seed=seed)
train = train.append(train_species)
valid = valid.append(valid_species)
test = test.append(test_species)
# Shuffle them a bit so that we dont get all species in the same batches
train = shuffle(train)
test = shuffle(test)
valid = shuffle(valid)
return train, valid, test
def main(path_csv, output_dir):
base_image_dir = r'C:\Users\mfarj\Documents\ss_data\snapshotserengeti-unzipped\snapshotserengeti-unzipped'
# Read input csv
species_df = | pd.read_csv(path_csv) | pandas.read_csv |
# Generates orders items and clickstream orders succeed
import json
import common_functions
import random
import numpy as np
import multiprocessing as mp
import pandas as pd
# reading config
with open('../config.json') as data:
config = json.load(data)
# general config
machine_cores = int(config["n_cores"])
out_path = config["output_path_files"]
language = config["language"]
# data processing config
amounts_cpu = config["data_processing"]["amount_in_cpu"]
auto_batch = True if config["data_processing"]["auto_batch_based_in_cpu"] == "True" else False
# orders items config
outfile = config["orders_items"]["outfile"]
number_max_prod_per_order = config["orders_items"]["number_max_prod_per_order"]
# orders config
orders_file = config["orders"]["outfile"]
n_orders = config["orders"]["total"]
# products config
n_products = config["products"]["total"]
# orders info
orders = pd.read_csv(out_path + orders_file)
# clickstream succeed
clickstream_outfile = config["clickstream_succeed"]["outfile"]
# random probabilities
num_products_prob = common_functions.random_probabilities(1, number_max_prod_per_order)
quantities = list(range(1,number_max_prod_per_order + 1))
# Global orders items and clickstrem succeed array
orders_items = []
clickstream_succeed = []
ids = set()
def generate_orders_items(amount, index_start):
# generates orders items and succeed clickstream
orders_items_partial = []
clickstream_succeed_partial = []
for i in range(amount):
index = index_start + i - 1
order_id = orders["order_id"][index]
customer_id = orders["customer_id"][index]
order_date = orders["order_date"][index]
campaign_id = orders["campaign_id"][index]
media_source = orders["media_source"][index]
num_products = np.random.choice(quantities, p=num_products_prob)
for _ in range(num_products):
quantity = random.randint(1,100) # random quantity, between 1 and 100 products
product_id = random.randint(1,n_products)
orders_items_partial.append((order_id, product_id, quantity))
if campaign_id != 0:
clickstream_succeed_partial.append((customer_id,order_date,campaign_id,media_source,product_id))
return (orders_items_partial,clickstream_succeed_partial)
def collect_orders_items(results):
# shows how many orders items were processed
global orders_items
global clickstream_succeed
orders_items = orders_items + results[0]
clickstream_succeed = clickstream_succeed + results[1]
# print the process
print("{} processed".format(len(orders_items)))
# The number of orders items row will be at least equal to the order size.
#
outsize = n_orders
# setting the number of cores used by the process, aka how many processes will run in parallel
print("Initializing using {} cores".format(machine_cores))
pool = mp.Pool(machine_cores)
# numbers of generated items in each loop
amounts = int(outsize/machine_cores) if auto_batch else amounts_cpu
number_of_loops = int(outsize/amounts)
residue = outsize - amounts * number_of_loops
# first generating residue
pool.apply_async(generate_orders_items, args=(residue, 1), callback=collect_orders_items)
# generating orders in parallel
for i in range(number_of_loops):
pool.apply_async(generate_orders_items, args=(amounts, (i * amounts) + residue + 1), callback=collect_orders_items)
# closing pool
pool.close()
pool.join()
# creating a data frame with the final results
df_orders_items = pd.DataFrame(orders_items)
df_clickstream_succeed = | pd.DataFrame(clickstream_succeed) | pandas.DataFrame |
from numpy.core.numeric import NaN
import pandas as pd
import math
from scipy.spatial import distance
import streamlit as st
# get 'players.csv' and 'appearances.csv' from Kaggle:
# https://www.kaggle.com/davidcariboo/player-scores
# DATA WRANGLING #
df = pd.read_csv('players.csv', encoding='utf-8')
# I manually made a list of Serie A club IDs
club_IDs = list(df['ID'][:20].astype(int))
club_IDs = [str(x) for x in club_IDs]
club_names = list(df['Club'][:20])
dict = {club_names[i]: club_IDs[i] for i in range(len(club_names))}
# Here's a dictionary of clubs + IDs
#{'Inter': '46', 'Napoli': '6195', 'Juve': '506', 'AC Milan': '5', 'Torino': '416', 'Sassuolo': '6574', 'Salernitana': '380', 'Roma': '12', 'Lazio': '398', 'Atalanta': '800', 'Cagliari': '1390', 'Empoli': '749', 'Udinese': '410', 'Sampdoria': '1038', 'Venezia': '607', 'Spezia': '3522', 'Genoa': '252', 'Verona': '276', 'Fiorentina': '430', 'Bologna': '1025'}
df = df[df['current_club_id'].isin(club_IDs)]
df = df.drop(columns=['market_value_in_gbp', 'url', 'Club', 'ID'])
player_IDs = df['player_id']
number_of_players = len(player_IDs)
# 1645 SERIE A players (includes youth teams)
#############################################################################################
df2 = | pd.read_csv('appearances.csv', encoding='utf-8') | pandas.read_csv |
import pandas as pd
import glob
from concurrent.futures import ThreadPoolExecutor
import numpy as np
def create_obs(ticker):
print(ticker)
quote = pd.read_csv(glob.glob(ticker+'/*-quote.csv')[0], index_col=0)
order = pd.read_csv(glob.glob(ticker+'/*-order.csv')[0], index_col=0)
date = glob.glob(ticker+'/*')[0][22:30]
date = date[:4]+'-'+date[4:6]+'-'+date[6:]
quote_start = quote.index.get_loc(date + ' 09:06:00')
quote_end = quote.index.get_loc(date + ' 15:00:00')
order_start = order.index.get_loc(date + ' 09:06:00')
order_end = order.index.get_loc(date + ' 15:00:00')
quote_window = quote[quote_start:quote_end]
order_window = order[order_start:order_end]
# print('quote len: ', len(quote_window))
# print('order len: ', len(order_window))
data = pd.concat([quote_window, order_window], axis=1)
# print('data shape: ', data.shape)
all_history = | pd.DataFrame([]) | pandas.DataFrame |
#Genero el dataset de febrero para el approach de boosting. Este approach tiene algunas variables mas incluyendo sumas y promedios de valores pasados
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% Cargo los datos, Con el dataset de boosting no hice las pruebas de quitarle un dia a marzo y agregarlo a febrero por falta de tiempo
#Se toma febrero y marzo tal como vienen
train = pd.read_parquet(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\train_data.parquet', engine='pyarrow')
#Cambio las variables object a categoricas
for col in ['currency', 'listing_type', 'shipping_logistic_type', 'shipping_payment']:
train[col] = train[col].astype('category')
train['date'] = pd.to_datetime(train['date'])
train['day'] =train.date.dt.day
train['month'] = train.date.dt.month
train['listing_type'] = train['listing_type'].factorize()[0]
train['shipping_logistic_type'] = train['shipping_logistic_type'].factorize()[0]
train['shipping_payment'] = train['shipping_payment'].factorize()[0]
febrero = train.loc[train['month']==2]
marzo = train.loc[train['month']==3]
febrero.to_csv('febrero_limpio.csv.gz',index=False, compression="gzip")
marzo.to_csv('marzo_limpio.csv.gz',index=False, compression="gzip")
#%% Febrero
febrero = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\febrero_limpio.csv.gz')
#Trabajo mejor el price
febrero = febrero.assign(current_price=febrero.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = febrero[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = febrero[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = febrero[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = febrero[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = febrero[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = febrero[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
del subtest1,subtest2,subtest3,subtest4,subtest6, subtest7
#%% Promedios cada 3 dias
febrero_test = febrero.sort_values(['sku','day']).reset_index(drop=True).copy()
febrero_test['promedio_3'] = febrero.groupby(['sku'])['sold_quantity'].rolling(3, min_periods=3).mean().reset_index(drop=True)
febrero_test['promedio_7'] = febrero.groupby(['sku'])['sold_quantity'].rolling(7, min_periods=7).mean().reset_index(drop=True)
febrero_test['promedio_15'] = febrero.groupby(['sku'])['sold_quantity'].rolling(15, min_periods=15).mean().reset_index(drop=True)
febrero_test['promedio_20'] = febrero.groupby(['sku'])['sold_quantity'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'promedio_3']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3', dropna=False).add_prefix('promedio_3')
subtest4 = febrero_test[['sku', 'day', 'promedio_7']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7', dropna=False).add_prefix('promedio_7')
subtest6 = febrero_test[['sku', 'day', 'promedio_15']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15', dropna=False).add_prefix('promedio_15')
subtest7 = febrero_test[['sku', 'day', 'promedio_20']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20', dropna=False).add_prefix('promedio_20')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = pd.merge(final, subtest7, left_index=True, right_index=True)
final = final.dropna(axis=1, how='all')
del subtest3,subtest4,subtest6, subtest7
febrero_test['promedio_3_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(3, min_periods=3).mean().reset_index(drop=True)
febrero_test['promedio_7_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(7, min_periods=7).mean().reset_index(drop=True)
febrero_test['promedio_15_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(15, min_periods=15).mean().reset_index(drop=True)
febrero_test['promedio_20_active_time'] = febrero.groupby(['sku'])['minutes_active'].rolling(20, min_periods=20).mean().reset_index(drop=True)
# Pivoteo y mergeo
subtest3 = febrero_test[['sku', 'day', 'promedio_3_active_time']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'promedio_3_active_time', dropna=False).add_prefix('promedio_3_active_time')
subtest4 = febrero_test[['sku', 'day', 'promedio_7_active_time']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'promedio_7_active_time', dropna=False).add_prefix('promedio_7_active_time')
subtest6 = febrero_test[['sku', 'day', 'promedio_15_active_time']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'promedio_15_active_time', dropna=False).add_prefix('promedio_15_active_time')
subtest7 = febrero_test[['sku', 'day', 'promedio_20_active_time']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'promedio_20_active_time', dropna=False).add_prefix('promedio_20_active_time')
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = | pd.merge(final, subtest4, left_index=True, right_index=True) | pandas.merge |
import matplotlib as mpl
# This line allows mpl to run with no DISPLAY defined
mpl.use('Agg')
from keras.layers import Dense, Flatten, Input, merge, Dropout
from keras.models import Model
from keras.optimizers import Adam
from keras.regularizers import l1, l1l2
import keras.backend as K
import pandas as pd
import numpy as np
from keras_adversarial.image_grid_callback import ImageGridCallback
from keras_adversarial import AdversarialModel, gan_targets, fix_names, n_choice, simple_bigan
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
from mnist_utils import mnist_data
from example_gan import model_generator
from keras.layers import BatchNormalization, LeakyReLU
import os
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1(1e-5), batch_norm_mode=0):
x = Input(input_shape, name="x")
h = Flatten()(x)
h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
h = BatchNormalization(mode=batch_norm_mode)(h)
h = LeakyReLU(0.2)(h)
h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
h = BatchNormalization(mode=batch_norm_mode)(h)
h = LeakyReLU(0.2)(h)
h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
h = BatchNormalization(mode=batch_norm_mode)(h)
h = LeakyReLU(0.2)(h)
mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(p[0].shape) * K.exp(p[1] / 2),
output_shape=lambda x: x[0])
return Model(x, z, name="encoder")
def model_discriminator(latent_dim, input_shape, output_dim=1, hidden_dim=2048,
reg=lambda: l1l2(1e-7, 1e-7), batch_norm_mode=1, dropout=0.5):
z = Input((latent_dim,))
x = Input(input_shape, name="x")
h = merge([z, Flatten()(x)], mode='concat')
h1 = Dense(hidden_dim, name="discriminator_h1", W_regularizer=reg())
b1 = BatchNormalization(mode=batch_norm_mode)
h2 = Dense(hidden_dim, name="discriminator_h2", W_regularizer=reg())
b2 = BatchNormalization(mode=batch_norm_mode)
h3 = Dense(hidden_dim, name="discriminator_h3", W_regularizer=reg())
b3 = BatchNormalization(mode=batch_norm_mode)
y = Dense(output_dim, name="discriminator_y", activation="sigmoid", W_regularizer=reg())
# training model uses dropout
_h = h
_h = Dropout(dropout)(LeakyReLU(0.2)((b1(h1(_h)))))
_h = Dropout(dropout)(LeakyReLU(0.2)((b2(h2(_h)))))
_h = Dropout(dropout)(LeakyReLU(0.2)((b3(h3(_h)))))
ytrain = y(_h)
mtrain = Model([z, x], ytrain, name="discriminator_train")
# testing model does not use dropout
_h = h
_h = LeakyReLU(0.2)((b1(h1(_h))))
_h = LeakyReLU(0.2)((b2(h2(_h))))
_h = LeakyReLU(0.2)((b3(h3(_h))))
ytest = y(_h)
mtest = Model([z, x], ytest, name="discriminator_test")
return mtrain, mtest
def example_bigan(path, adversarial_optimizer):
# z \in R^100
latent_dim = 25
# x \in R^{28x28}
input_shape = (28, 28)
# generator (z -> x)
generator = model_generator(latent_dim, input_shape)
# encoder (x ->z)
encoder = model_encoder(latent_dim, input_shape)
# autoencoder (x -> x')
autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)))
# discriminator (x -> y)
discriminator_train, discriminator_test = model_discriminator(latent_dim, input_shape)
# bigan (z, x - > yfake, yreal)
bigan_generator = simple_bigan(generator, encoder, discriminator_test)
bigan_discriminator = simple_bigan(generator, encoder, discriminator_train)
# z generated on GPU based on batch dimension of x
x = bigan_generator.inputs[1]
z = normal_latent_sampling((latent_dim,))(x)
# eliminate z from inputs
bigan_generator = Model([x], fix_names(bigan_generator([z, x]), bigan_generator.output_names))
bigan_discriminator = Model([x], fix_names(bigan_discriminator([z, x]), bigan_discriminator.output_names))
generative_params = generator.trainable_weights + encoder.trainable_weights
# print summary of models
generator.summary()
encoder.summary()
discriminator_train.summary()
bigan_discriminator.summary()
autoencoder.summary()
# build adversarial model
model = AdversarialModel(player_models=[bigan_generator, bigan_discriminator],
player_params=[generative_params, discriminator_train.trainable_weights],
player_names=["generator", "discriminator"])
model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)],
loss='binary_crossentropy')
# load mnist data
xtrain, xtest = mnist_data()
# callback for image grid of generated samples
def generator_sampler():
zsamples = np.random.normal(size=(10 * 10, latent_dim))
return generator.predict(zsamples).reshape((10, 10, 28, 28))
generator_cb = ImageGridCallback(os.path.join(path, "generated-epoch-{:03d}.png"), generator_sampler)
# callback for image grid of autoencoded samples
def autoencoder_sampler():
xsamples = n_choice(xtest, 10)
xrep = np.repeat(xsamples, 9, axis=0)
xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28))
xsamples = xsamples.reshape((10, 1, 28, 28))
x = np.concatenate((xsamples, xgen), axis=1)
return x
autoencoder_cb = ImageGridCallback(os.path.join(path, "autoencoded-epoch-{:03d}.png"), autoencoder_sampler)
# train network
y = gan_targets(xtrain.shape[0])
ytest = gan_targets(xtest.shape[0])
history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb],
nb_epoch=100, batch_size=32)
# save history
df = | pd.DataFrame(history.history) | pandas.DataFrame |
import sys
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from get_data_brasil import run_crear_excel_brasil
from get_data_brasil_wcota import run_crear_excel_brasil_wcota
from get_data_pernambuco import run_crear_excel_recife
from get_data_ourworldindata import run_crear_excel_ourworldindata
from pandas import ExcelWriter
import colormap
import plotly.graph_objects as go
from PIL import Image
import base64
import os
matplotlib.use('tkagg')
def plotly_html(a_14_days, p_seven, dia, bra_title, save_path, filename_bg):
for i in range(len(p_seven)):
if p_seven[i] < 0.0:
p_seven[i] = 0.0
color_map = []
for i in range(len(a_14_days)):
if i < len(a_14_days) - 60:
color_map.append('rgba(0, 0, 0, 0.1)')
elif i == len(a_14_days) - 1:
color_map.append('rgba(255, 255, 255, 0.6)')
else:
color_map.append('Blue')
fig = go.Figure()
fig.add_trace(go.Scatter(x=a_14_days,
y=p_seven,
text=dia,
mode='lines+markers',
marker=dict(
color=color_map,
showscale=False,
size=10,
line=dict(
color='Black',
width=0.2)),
line=dict(
color="Black",
width=0.5,
dash="dot"),
))
fig.add_shape(type="line",
x0=0,
y0=1,
x1=max(a_14_days),
y1=1,
line=dict(
color="Black",
width=1,
dash="dot",
))
image_filename = filename_bg
img = base64.b64encode(open(image_filename, 'rb').read())
x = round(a_14_days.max())
y = round(p_seven.max())
print(x, y)
fig.add_layout_image(
dict(
source='data:image/png;base64,{}'.format(img.decode()),
xref="x",
yref="y",
x=0,
y=p_seven.max(),
sizex=a_14_days.max(),
sizey=p_seven.max(),
xanchor="left",
yanchor="top",
sizing="stretch",
opacity=0.95,
layer="below"))
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.9,
text="EPG > 100: High", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.87, y1=0.89, fillcolor="Red", line_color="Red")
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.86,
text=" 70 < EPG < 100: Moderate-high", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.86, y1=0.78, fillcolor="Yellow", line_color="Yellow")
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.82,
text=" 30 < EPG < 70 : Moderate", showarrow=False))
fig.add_annotation(dict(font=dict(color='black', size=9),
xref="paper", yref="paper",
x=0.9, y=0.78,
text="EPG < 30: Low", showarrow=False))
fig.add_annotation(dict(font=dict(color='blue', size=9),
xref="paper", yref="paper",
x=0.9, y=0.728,
text="Last 60 days", showarrow=False))
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.77, y1=0.74, fillcolor="Green", line_color="Green")
fig.add_shape(type="rect",
xref="paper", yref="paper",
x0=0.9, x1=0.91, y0=0.725, y1=0.70, fillcolor="Blue", line_color="Blue")
fig.update_layout(plot_bgcolor='rgb(255,255,255)',
width=800,
height=600,
xaxis_showgrid=False,
yaxis_showgrid=False,
xaxis_title="Attack rate per 10⁵ inh. (last 14 days)",
yaxis_title="\u03C1 (mean of the last 7 days)",
title={
'text': bra_title,
'y': 0.9,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top'},
)
fig.update_xaxes(rangemode="tozero")
fig.update_yaxes(rangemode="tozero")
# fig.show()
os.remove(filename_bg)
fig.write_html(filename_bg+'.html', include_plotlyjs="cdn")
def run_risk_diagrams(argv_1, deaths, file_others_cases, file_others_pop, radio_valor, ourworldindata_country):
if argv_1:
last_days_time = 30
brasil = False
pt = False
html = False
last_days = False
animation = False
if radio_valor == 1:
last_days = True
elif radio_valor == 2:
html = True
else:
pass
dataTable = []
dataTable_EPG = []
if argv_1 == 'brasil' and deaths == 'False':
try:
run_crear_excel_brasil()
filename = 'data/Data_Brasil.xlsx'
filename_population = 'data/pop_Brasil_v3.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'brasil_regions' and deaths == 'False':
try:
run_crear_excel_brasil()
filename = 'data/Data_Brasil.xlsx'
filename_population = 'data/pop_Brasil_Regions_v3.xlsx'
sheet_name = 'Regions'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'recife':
try:
run_crear_excel_recife()
filename = 'data/cases-recife.xlsx'
filename_population = 'data/pop_recife_v1.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'WCOTA':
try:
#run_crear_excel_brasil_wcota('AM')
#run_crear_excel_brasil_wcota('PB')
run_crear_excel_brasil_wcota('SP')
filename = 'data/cases-wcota.xlsx'
#filename_population = 'data/pop_AM_v1.xlsx'
filename_population = 'data/pop_SP_v1.xlsx'
#filename_population = 'data/pop_PB_v1.xlsx'
#filename = 'data/Dades.xlsx'
#filename_population = 'data/pop_Dades.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'ourworldindata' and deaths == 'False':
try:
run_crear_excel_ourworldindata(ourworldindata_country)
filename = 'data/ourworldindata.xlsx'
filename_population = 'data/pop_ourworldindata_v1.xlsx'
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
elif argv_1 == 'others' and deaths == 'False':
try:
filename = file_others_cases
filename_population = file_others_pop
sheet_name = 'Cases'
except AttributeError:
print('Error! Not found file or could not download!')
data = pd.read_excel(filename, sheet_name=sheet_name)
population = pd.read_excel(filename_population)
dia = pd.to_datetime(data['date']).dt.strftime('%d/%m/%Y')
dia = dia.to_numpy()
region = population.columns
for ID in range(len(region)):
cumulative_cases = data[region[ID]]
cumulative_cases = cumulative_cases.to_numpy()
new_cases = np.zeros((len(cumulative_cases)), dtype=np.int)
for i in range(len(cumulative_cases)):
if i != 0:
new_cases[i] = cumulative_cases[i] - \
cumulative_cases[i - 1]
p = np.zeros((len(new_cases)), dtype=np.float)
for i in range(7, len(new_cases)):
div = 0
aux = new_cases[i - 5] + new_cases[i - 6] + new_cases[i - 7]
if aux == 0:
div = 1
else:
div = aux
p[i] = min((new_cases[i] + new_cases[i - 1] +
new_cases[i - 2]) / div, 4)
p_seven = np.zeros((len(new_cases)), dtype=np.float)
n_14_days = np.zeros((len(new_cases)), dtype=np.float)
a_14_days = np.zeros((len(new_cases)), dtype=np.float)
risk = np.zeros((len(new_cases)), dtype=np.float)
risk_per_10 = np.zeros((len(new_cases)), dtype=np.float)
day13 = 13
for i in range(day13, len(new_cases)):
p_seven[i] = np.average(p[i - 6:i + 1])
n_14_days[i] = np.sum(new_cases[i - day13: i + 1])
pop = population[region[ID]]
a_14_days[i] = n_14_days[i] / pop * 100000
risk[i] = n_14_days[i] * p_seven[i]
risk_per_10[i] = a_14_days[i] * p_seven[i]
first_day = dia[day13]
last_day = dia[len(dia) - 1]
first_day = first_day.replace('/', '-')
last_day = last_day.replace('/', '-')
# For last 15 days
if last_days:
a_14_days_solo = []
day13 = len(a_14_days) - last_days_time
first_day = dia[day13]
for i in range(len(a_14_days)):
if i >= len(a_14_days) - last_days_time:
a_14_days_solo.append(a_14_days[i])
else:
a_14_days_solo.append(None)
save_path = 'static_graphic' + '/' + last_day + '-' + region[ID]
save_path_temp = 'static_graphic' + '/interactive_graphic/' + last_day + '-' + region[ID]
save_path_xlsx = 'static_graphic/xlsx/'
fig1, ax1 = plt.subplots(sharex=True)
if last_days:
ax1.plot(a_14_days, p_seven, 'ko--', fillstyle='none',
linewidth=0.5, color=(0, 0, 0, 0.15))
ax1.plot(a_14_days_solo, p_seven, 'ko--',
fillstyle='none', linewidth=0.5) # For last 15 days
ax1.plot(a_14_days_solo[len(a_14_days_solo) - 1],
p_seven[len(p_seven) - 1], 'bo')
else:
ax1.plot(a_14_days, p_seven, 'ko--',
fillstyle='none', linewidth=0.5)
ax1.plot(a_14_days[len(a_14_days) - 1],
p_seven[len(p_seven) - 1], 'bo')
lim = ax1.get_xlim()
x = np.ones(int(lim[1]))
ax1.plot(x, 'k--', fillstyle='none', linewidth=0.5)
ax1.set_ylim(0, 4)
ax1.set_xlim(0, int(lim[1]))
ax1.set_ylabel('$\u03C1$ (mean of the last 7 days)')
ax1.set_xlabel('Attack rate per $10^5$ inh. (last 14 days)')
ax1.annotate(first_day,
xy=(a_14_days[day13], p_seven[day13]
), xycoords='data',
xytext=(len(x) - abs(len(x) / 1.5), 2.7), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3", linewidth=0.4),
)
ax1.annotate(last_day,
xy=(a_14_days[len(a_14_days) - 1],
p_seven[len(p_seven) - 1]), xycoords='data',
xytext=(len(x) - abs(len(x) / 2), 3), textcoords='data',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3", linewidth=0.4),
)
bra_title = region[ID]
plt.title(region[ID])
plt.annotate(
' EPG > 100: High', xy=(len(x) - abs(len(x) / 3.5), 3.8), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(0, 0, 0, 0), lw=0, pad=2))
plt.annotate(
" 70 < EPG < 100: Moderate-high\n"
" 30 < EPG < 70 : Moderate", xy=(len(x) - abs(len(x) / 3.5), 3.55), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(0, 0, 0, 0), lw=0, pad=2))
plt.annotate(
' EPG < 30: Low', xy=(len(x) - abs(len(x) / 3.5), 3.3), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(0, 0, 0, 0), lw=0, pad=2))
plt.annotate(
' ', xy=(len(x) - abs(len(x) / 3.3), 3.8), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(1, 0, 0, .5), lw=0, pad=2))
plt.annotate(
" \n", xy=(len(x) - abs(len(x) / 3.3), 3.55), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(1, 1, 0, .5), lw=0, pad=2))
plt.annotate(
' ', xy=(len(x) - abs(len(x) / 3.3), 3.3), color=(0, 0, 0),
ha='left', va='center', fontsize='6',
bbox=dict(fc=(0, 1, 0, .5), lw=0, pad=2))
if ourworldindata_country is not None:
plt.subplots_adjust(bottom=0.2)
text_annotate = (
"*The risk diagram was developed using the Our World in Data database. Last update: " + str(last_day) + ".")
plt.text(0, -1, text_annotate, fontsize=7, wrap=False)
rh = np.arange(0, int(lim[1]), 1)
ar = np.linspace(0, 4, 400)
RH, AR = np.meshgrid(rh, ar)
EPG = RH * AR
for i in range(len(EPG)):
for j in range(len(EPG[i])):
if EPG[i][j] > 100:
EPG[i][j] = 100
c = colormap.Colormap()
mycmap = c.cmap_linear('green(w3c)', 'yellow', 'red')
ax1.pcolorfast([0, int(lim[1])], [0, 4],
EPG, cmap=mycmap, alpha=0.6)
ax1.set_aspect('auto')
if html:
figt, axt = plt.subplots(sharex=True)
axt.pcolorfast([0, int(lim[1])], [0, 4],
EPG, cmap=mycmap, alpha=0.6)
axt.set_axis_off()
figt.savefig(save_path_temp, format='png',
bbox_inches='tight', dpi=300, pad_inches=0)
plotly_html(a_14_days, p_seven, dia, bra_title,
save_path_xlsx, save_path_temp)
else:
plt.savefig(save_path + '.png', bbox_inches='tight', dpi=300)
plt.close('all')
print(
"\n\nPrediction for the region of " + region[
ID] + " performed successfully!\nPath:" + save_path)
dataTable.append([region[ID], cumulative_cases[len(cumulative_cases) - 1], new_cases[len(new_cases) - 1], p[len(p) - 1], p_seven[len(
p_seven) - 1], n_14_days[len(n_14_days) - 1], a_14_days[len(a_14_days) - 1], risk[len(risk) - 1], risk_per_10[len(risk_per_10) - 1]])
for i in range(len(dia)):
dataTable_EPG.append([dia[i], region[ID], risk_per_10[i]])
df = pd.DataFrame(dataTable, columns=['State', 'Cumulative cases', 'New cases', 'ρ', 'ρ7', 'New cases last 14 days (N14)',
'New cases last 14 days per 105 inhabitants (A14)', 'Risk (N14*ρ7)', 'Risk per 10^5 (A14*ρ7)'])
df_EPG = pd.DataFrame(dataTable_EPG, columns=['DATE', 'CITY', 'EPG'])
with ExcelWriter(save_path_xlsx + last_day + '_' + argv_1 + '_report.xlsx') as writer:
df.to_excel(writer, sheet_name='Alt_Urgell')
with | ExcelWriter(save_path_xlsx + last_day + '_' + argv_1 + '_report_EPG.xlsx') | pandas.ExcelWriter |
import pandas as pd
import requests
import pandas_datareader as web
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from math import floor
from termcolor import colored as cl
plt.rcParams['figure.figsize'] = (20, 10)
plt.style.use('fivethirtyeight')
# EXTRACTING STOCK DATA
def get_historical_data(symbol, start_date):
api_key = 'YOUR API KEY'
api_url = f'https://api.twelvedata.com/time_series?symbol={symbol}&interval=1day&outputsize=5000&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['values']).iloc[::-1].set_index('datetime').astype(float)
df = df[df.index >= start_date]
df.index = pd.to_datetime(df.index)
return df
aapl = get_historical_data('AAPL', '2020-01-01')
aapl
def get_cci(symbol, n, start_date):
api_key = open(r'api_key.txt')
url = f'https://www.alphavantage.co/query?function=CCI&symbol={symbol}&interval=daily&time_period={n}&apikey={api_key}'
raw = requests.get(url).json()
df = | pd.DataFrame(raw['Technical Analysis: CCI']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import datetime
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import DES_weather_analysis
from DES_weather_analysis import clustring_kmean_forced, clustring_kmediod_PCA_operation, EPW_to_csv,solar_irradiance,solar_position
from DES_weather_analysis.solar_irradiance import aoi, get_total_irradiance
from DES_weather_analysis.solar_position import get_solarposition
JtokWh = 2.7778e-7
def kmedoid_clusters(path_test,mode):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
scenario_reduction_path= os.path.join(path_test,'ScenarioReduction')
scenarios_path = os.path.join(path_test,'ScenarioGeneration')
if not os.path.exists(scenario_reduction_path):
os.makedirs(scenario_reduction_path)
representative_days_path = scenario_reduction_path
num_scenario = 0
num_scenarios = int(editable_data['num_scenarios'])
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
altitude = float(editable_data['Altitude']) #SLC altitude m
surf_tilt = float(editable_data['solar_tilt']) #panels tilt degree
surf_azimuth = float(editable_data['solar_azimuth']) #panels azimuth degree
idf_names= []
thermal_eff_dict= {}
weight_factor={}
for i in range(int(editable_data['number_buildings'])):
if 'building_name_'+str(i+1) in editable_data.keys():
building_name = editable_data['building_name_'+str(i+1)]
idf_names.append(building_name)
thermal_eff_dict[building_name]=float(editable_data['thermal_eff_'+str(i+1)])
weight_factor[building_name]=float(editable_data['WF_'+str(i+1)])
#idf_names=idf_names[1:2]
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
dict_EPWs = {}
list_years = []
list_tmys =[]
list_fmys = []
for year in reversed(range(start_year,end_year+1)):
weather_data = city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year)
list_years.append(weather_data)
for i in range(5):
if 'TMY'+str(i+1)+'_name' in editable_data.keys():
TMY_name = editable_data['TMY'+str(i+1)+'_name']
list_tmys.append(TMY_name)
if 'FMY'+str(i+1)+'_name' in editable_data.keys():
FMY_name = editable_data['FMY'+str(i+1)+'_name']
list_fmys.append(FMY_name)
dict_EPWs['AMYs']=list_years
dict_EPWs['FMYs']=list_fmys
dict_EPWs['TMYs']=list_tmys
global k
def scenario_reduction_per_year(scenario_genrated,name,weather_data):
global k
days= 365
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
scenario_probability = [1]*365
k = 0
#print(scenario_genrated)
for i in range(days):
data_new = scenario_genrated[i*24:(i+1)*24]
#print(data_new.keys())
data_1 = data_new['Total Electricity']
data_2 = data_new['Total Heating']
#print(data_1)
#print(name,i,k,data_1[15],data_2[15])
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,30,1)
if search_optimum_cluster=='yes' and name== 'total_'+dict_EPWs['TMYs'][-1]+'_':
print('Defining the optimum number of clusters: ')
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (15,10)
fig, ax = plt.subplots(figsize=(15, 10))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
#ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,30,1))
plt.savefig(os.path.join(path_test, 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled[scenario].reshape(1,-1)))
#print(data_all_labels)
A_scaled_list[scenario]=A_scaled[scenario].tolist()
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path , name+ 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path , name + 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
Scenario_generated_new = standardization_data.inverse_transform(kmedoids.cluster_centers_)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#max_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[2]
#print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios))
design_day_heating = []
design_day_electricity = []
heating_dd = []
for i in range(24):
if len(max_electricity_scenarios_nested_list[i])==1:
design_day_electricity.append(max_electricity_scenarios_nested_list[i][0])
else:
try:
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
except:
design_day_electricity.append(0)
#print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i])
if len(max_heating_scenarios_nested_list[i])==1:
heating_dd.append(max_heating_scenarios_nested_list[i][0])
design_day_heating.append(np.max(heating_dd))
else:
try:
heating_dd = [j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]
design_day_heating.append(np.max(heating_dd))
except:
design_day_heating.append(0)
for i in range(24):
if design_day_electricity[i]==0:
if i==0:
design_day_electricity[i] = design_day_electricity[i+1]
elif i==23:
design_day_electricity[i] = design_day_electricity[i-1]
else:
design_day_electricity[i] = (design_day_electricity[i-1]+design_day_electricity[i+1])/2
if design_day_heating[i]==0:
if i==0:
design_day_heating[i] = design_day_heating[i+1]
elif i==23:
design_day_heating[i] = design_day_heating[i-1]
else:
design_day_heating[i] = (design_day_heating[i-1]+design_day_heating[i+1])/2
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48): #24*5=120 features in each day
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#zmax_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified= | pd.DataFrame(data_represent_days_modified) | pandas.DataFrame |
# %%
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
"""
===========================
GridSearch with Census Data
===========================
"""
# %%
# This notebook shows how to use Fairlearn to generate predictors for the Census dataset.
# This dataset is a classification problem - given a range of data about 32,000 individuals,
# predict whether their annual income is above or below fifty thousand dollars per year.
#
# For the purposes of this notebook, we shall treat this as a loan decision problem.
# We will pretend that the label indicates whether or not each individual repaid a loan in
# the past.
# We will use the data to train a predictor to predict whether previously unseen individuals
# will repay a loan or not.
# The assumption is that the model predictions are used to decide whether an individual
# should be offered a loan.
#
# We will first train a fairness-unaware predictor and show that it leads to unfair
# decisions under a specific notion of fairness called *demographic parity*.
# We then mitigate unfairness by applying the :code:`GridSearch` algorithm from the
# Fairlearn package.
# %%
# Load and preprocess the data set
# --------------------------------
# We download the data set using `fetch_adult` function in `fairlearn.datasets`.
# We start by importing the various modules we're going to use:
#
from sklearn.model_selection import train_test_split
from fairlearn.reductions import GridSearch
from fairlearn.reductions import DemographicParity, ErrorRate
from fairlearn.metrics import MetricFrame, selection_rate, count
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn import metrics as skm
import pandas as pd
# %%
# We can now load and inspect the data by using the `fairlearn.datasets` module:
from sklearn.datasets import fetch_openml
data = fetch_openml(data_id=1590, as_frame=True)
X_raw = data.data
Y = (data.target == '>50K') * 1
X_raw
# %%
# We are going to treat the sex of each individual as a sensitive
# feature (where 0 indicates female and 1 indicates male), and in
# this particular case we are going separate this feature out and drop it
# from the main data.
# We then perform some standard data preprocessing steps to convert the
# data into a format suitable for the ML algorithms
A = X_raw["sex"]
X = X_raw.drop(labels=['sex'], axis=1)
X = pd.get_dummies(X)
sc = StandardScaler()
X_scaled = sc.fit_transform(X)
X_scaled = pd.DataFrame(X_scaled, columns=X.columns)
le = LabelEncoder()
Y = le.fit_transform(Y)
# %%
# Finally, we split the data into training and test sets:
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(X_scaled,
Y,
A,
test_size=0.2,
random_state=0,
stratify=Y)
# Work around indexing bug
X_train = X_train.reset_index(drop=True)
A_train = A_train.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
A_test = A_test.reset_index(drop=True)
# %%
# Training a fairness-unaware predictor
# -------------------------------------
#
# To show the effect of Fairlearn we will first train a standard ML predictor
# that does not incorporate fairness.
# For speed of demonstration, we use the simple
# :class:`sklearn.linear_model.LogisticRegression` class:
unmitigated_predictor = LogisticRegression(solver='liblinear', fit_intercept=True)
unmitigated_predictor.fit(X_train, Y_train)
# %%
# We can start to assess the predictor's fairness using the `MetricFrame`:
metric_frame = MetricFrame(metric={"accuracy": skm.accuracy_score,
"selection_rate": selection_rate,
"count": count},
sensitive_features=A_test,
y_true=Y_test,
y_pred=unmitigated_predictor.predict(X_test))
print(metric_frame.overall)
print(metric_frame.by_group)
metric_frame.by_group.plot.bar(
subplots=True, layout=[3, 1], legend=False, figsize=[12, 8],
title='Accuracy and selection rate by group')
# %%
# Looking at the disparity in accuracy, we see that males have an error
# about three times greater than the females.
# More interesting is the disparity in opportunity - males are offered loans at
# three times the rate of females.
#
# Despite the fact that we removed the feature from the training data, our
# predictor still discriminates based on sex.
# This demonstrates that simply ignoring a sensitive feature when fitting a
# predictor rarely eliminates unfairness.
# There will generally be enough other features correlated with the removed
# feature to lead to disparate impact.
# %%
# Mitigation with GridSearch
# --------------------------
#
# The :class:`fairlearn.reductions.GridSearch` class implements a simplified version of the
# exponentiated gradient reduction of `Agarwal et al. 2018 <https://arxiv.org/abs/1803.02453>`_.
# The user supplies a standard ML estimator, which is treated as a blackbox.
# `GridSearch` works by generating a sequence of relabellings and reweightings, and
# trains a predictor for each.
#
# For this example, we specify demographic parity (on the sensitive feature of sex) as
# the fairness metric.
# Demographic parity requires that individuals are offered the opportunity (are approved
# for a loan in this example) independent of membership in the sensitive class (i.e., females
# and males should be offered loans at the same rate).
# We are using this metric for the sake of simplicity; in general, the appropriate fairness
# metric will not be obvious.
sweep = GridSearch(LogisticRegression(solver='liblinear', fit_intercept=True),
constraints=DemographicParity(),
grid_size=71)
# %%
# Our algorithms provide :code:`fit()` and :code:`predict()` methods, so they behave in a similar manner
# to other ML packages in Python.
# We do however have to specify two extra arguments to :code:`fit()` - the column of sensitive
# feature labels, and also the number of predictors to generate in our sweep.
#
# After :code:`fit()` completes, we extract the full set of predictors from the
# :class:`fairlearn.reductions.GridSearch` object.
sweep.fit(X_train, Y_train,
sensitive_features=A_train)
predictors = sweep.predictors_
# %%
# We could plot performance and fairness metrics of these predictors now.
# However, the plot would be somewhat confusing due to the number of models.
# In this case, we are going to remove the predictors which are dominated in the
# error-disparity space by others from the sweep (note that the disparity will only be
# calculated for the sensitive feature; other potentially sensitive features will
# not be mitigated).
# In general, one might not want to do this, since there may be other considerations
# beyond the strict optimization of error and disparity (of the given sensitive feature).
errors, disparities = [], []
for m in predictors:
def classifier(X): return m.predict(X)
error = ErrorRate()
error.load_data(X_train, pd.Series(Y_train), sensitive_features=A_train)
disparity = DemographicParity()
disparity.load_data(X_train, | pd.Series(Y_train) | pandas.Series |
# Copyright 2021 <NAME>, spideynolove @ GitHub
# See LICENSE for details.
__author__ = '<NAME> @spideynolove in GitHub'
__version__ = '0.0.1'
# mimic pro code
# from .technical import technical_indicators, moving_averages, pivot_points
import investpy as iv
import os
import numpy as np
import pandas as pd
import datetime
import re
from settings import *
from functools import reduce
from pprint import pprint
'''
# --------- investpy market folder path
equity_path = 'investpy/equitiesdata/'
crypto_path = 'investpy/cryptodata/'
'''
# today = datetime.date.today().strftime("%d/%m/%Y")
today = '19/08/2021'
def convert_date(date):
return date.strftime("%d/%m/%Y")
def calculate_stats(source=combine_path, periods=13,
quotes='cor_bond', interval='Daily'):
df = pd.read_csv(source+f'{quotes}_{interval}.csv')
df = df.iloc[-periods-1:]
df['Mean'] = df.iloc[:, 1:5].mean(axis=1)
df['Std'] = df.iloc[:, 1:5].std(axis=1)
df['Skew'] = df.iloc[:, 1:5].skew(axis=1)
df['Kurt'] = df.iloc[:, 1:5].kurtosis(axis=1)
# error if not have Close columns
df['Change%'] = df['Close'].pct_change()*100
df['Mchange%'] = df['Mean'].pct_change()*100
# consider drop or not
df.drop(columns=['Open', 'High', 'Low'], inplace=True)
df.set_index('Date', inplace=True)
df = df[-periods:]
# print(quotes)
# print(df)
df.to_csv(analysis_path + f'{quotes}_{periods}_{interval}_stats.csv')
def calculate_one_stats():
pass
def correlation_one(source=combine_path, periods=13,
quotes='cor_bond', interval='Daily'):
# read data
df = pd.read_csv(source+f'{quotes}_{interval}.csv')
df = df.iloc[-periods-1:]
df = df.corr()
# print(quotes, periods, interval)
# print(df)
# print()
# print(df.corr()) # method='kendall' / 'spearman'
df.to_csv(analysis_path + f'{quotes}_{periods}_{interval}_corr.csv')
def residuals_formula():
pass
def correlation_two(periods=4, interval='Daily',
dicts={'currenciesdata': 'XAUUSD',
'rates-bondsdata': 'U.S. 10Y'}):
sources = list(dicts.keys())
quotes = list(dicts.values())
df = pd.read_csv(
f'investpy/{sources[0]}/{quotes[0]}_{interval}.csv')
df = df.iloc[-periods-1:]
df.reset_index(inplace=True)
df1 = pd.read_csv(
f'investpy/{sources[1]}/{quotes[1]}_{interval}.csv')
df1 = df1.iloc[-periods-1:]
df1.reset_index(inplace=True)
df_ = list(df.corrwith(df1))
df1_ = list(df.corrwith(df1, axis=1))
return df_[-len(df_)+1:], df1_[-len(df1_)+1:]
def combine_params(filename, params, interval):
check_data(combine_path, f'{filename}_{interval}.csv')
main_df = | pd.DataFrame() | pandas.DataFrame |
import ast, json, logging, os, sys, time, traceback, requests
from datetime import datetime
from multiprocessing import Process, Queue
from urllib.parse import urlparse
import pandas as pd
import sqlalchemy as s
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
from workers.standard_methods import *
from sqlalchemy.sql.expression import bindparam
class GHPullRequestWorker:
"""
Worker that collects Pull Request related data from the Github API and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config, task=None):
self._task = task
self._child = None
self._queue = Queue()
self._maintain_queue = Queue()
self.working_on = None
self.config = config
LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s'
logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT)
logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid())))
self.db = None
self.table = None
self.API_KEY = self.config['key']
self.tool_source = 'GitHub Pull Request Worker'
self.tool_version = '0.0.1' # See __init__.py
self.data_source = 'GitHub API'
self.results_counter = 0
self.headers = {'Authorization': f'token {self.API_KEY}'}
self.history_id = None
self.finishing_task = True
self.specs = {
"id": self.config['id'],
"location": self.config['location'],
"qualifications": [
{
"given": [['github_url']],
"models":['pull_requests', 'pull_request_commits', 'pull_request_files']
}
],
"config": [self.config]
}
self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user'], self.config['password'], self.config['host'],
self.config['port'], self.config['database']
)
#Database connections
logging.info("Making database connections...\n")
dbschema = 'augur_data'
self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = MetaData()
helper_metadata = MetaData()
metadata.reflect(self.db, only=['contributors', 'pull_requests',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files'])
helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth'])
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
self.contributors_table = Base.classes.contributors.__table__
self.pull_requests_table = Base.classes.pull_requests.__table__
self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__
self.pull_request_events_table = Base.classes.pull_request_events.__table__
self.pull_request_labels_table = Base.classes.pull_request_labels.__table__
self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__
self.pull_request_meta_table = Base.classes.pull_request_meta.__table__
self.pull_request_repo_table = Base.classes.pull_request_repo.__table__
self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__
self.pull_request_teams_table = Base.classes.pull_request_teams.__table__
self.message_table = Base.classes.message.__table__
self.pull_request_commits_table = Base.classes.pull_request_commits.__table__
self.pull_request_files_table = Base.classes.pull_request_files.__table__
self.history_table = HelperBase.classes.worker_history.__table__
self.job_table = HelperBase.classes.worker_job.__table__
logging.info("Querying starting ids info...\n")
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1
self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id')
self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id')
self.msg_id_inc = get_max_id(self, 'message', 'msg_id')
self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id')
self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id')
self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id')
self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id')
self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id')
self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id')
# Organize different api keys/oauths available
init_oauths(self)
# Send broker hello message
connect_to_broker(self)
# self.pull_requests_graphql({
# 'job_type': 'MAINTAIN',
# 'models': ['pull_request_files'],
# 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git',
# 'given': {
# 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git'
# }
# }, 25201)
def update_config(self, config):
""" Method to update config and set a default
"""
self.config = {
"display_name": "",
"description": "",
"required": 1,
"type": "string"
}
self.config.update(config)
self.API_KEY = self.config['key']
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
github_url = value['given']['github_url']
repo_url_SQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(github_url))
rs = | pd.read_sql(repo_url_SQL, self.db, params={}) | pandas.read_sql |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = | date_range('1/1/2012', periods=4, freq='3H') | pandas.date_range |
import os
import shutil
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from scipy.stats import ttest_ind
from utils.file import get_file_paths
class DataPreprocess:
def __init__(self, data_dir: str, top_n_gene: list, gene_limit):
# Data folder path which includes train-test dataset and class file
self.data_dir = data_dir
self.top_n_gene = top_n_gene
self.gene_limit = gene_limit
# Getting file names to dict and accessing them just typing name of file eg. 'train', 'test'
self.file_paths = get_file_paths(self.data_dir)
# Read train-test datasets and class file
self.train, self.test = self.read_data()
self.classes, self.encoder = self.read_classes()
self.t_test_result = self.data_preprocess()
self.top_n_values = self.get_top_n()
self.save_top_n()
def data_preprocess(self):
self.remove_fold_data(5)
self.threshold_data()
self.remove_low_variance()
return self.calculate_t(save_df=True)
def read_data(self):
# Reading train and test csv files and converting them into numpy
# For now only selecting first 100 genes. It helps working faster for further steps
if self.gene_limit == 0:
train = | pd.read_csv(self.file_paths['train']) | pandas.read_csv |
# -*- coding: utf-8 -*-
import dash
from dash.dependencies import Input, Output, State, Event
from dash.exceptions import PreventUpdate
import dash_html_components as html
import dash_core_components as dcc
import dash_table_experiments as dt
from flask_caching import Cache
import numpy as np
import os
import pandas as pd
import plotly.graph_objs as go
import time
import json
from perspective import Perspective
from twitter import Twitter
perspective_key = os.environ.get('PERSPECTIVE_KEY')
perspective_client = Perspective(perspective_key)
twitter_consumer_key = os.environ.get('TWITTER_KEY')
twitter_consumer_secret = os.environ.get('TWITTER_SECRET')
twitter_client = Twitter(twitter_consumer_key, twitter_consumer_secret)
app = dash.Dash('harassment dashboard')
server = app.server
CACHE_CONFIG = {
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': os.environ.get('REDIS_URL', 'localhost:6379')
}
cache = Cache()
cache.init_app(app.server, config=CACHE_CONFIG)
app.css.append_css({"external_url": "https://codepen.io/chriddyp/pen/bWLwgP.css"})
app.css.append_css({"external_url": "https://codepen.io/prometheusred/pen/MVbJvO.css"})
colors = {
'background': 'white',
'text': 'black',
'high': '#D400F9',
'medium': '#6d60fe',
'low': '#25C1F9',
}
explanation = "This dashboard visualizes toxic language in Tweets and offers a way to engage to help the harassed. Just enter someone's twitter handle to see if they are the target of toxicity and harassment."
center_el = {'width': '600px',
'textAlign': 'center',
'margin': '50px auto'}
center_container = {'margin': '0px auto',
'justify-content': 'center',
'display': 'flex',
'textAlign': 'center'}
top_container = {'minWidth': '675px',
'minHeight': '500px',
'width': '100%'}
right_el = {'margin': '24px 0 0 2px',
'height': '38px'}
left_graph = {'float': 'left',
'width': '50%',
'marginTop': '-70px'}
right_text = {'flex': '1',
'overflow': 'hidden',
'align-self': 'center',
'textAlign': 'left'}
warning = {
'color': 'red',
'margin': '10px 20px 0 0',
'text-align': 'center',
}
app.layout = html.Div([
html.H1(children='Help the Harassed', style=center_el),
html.P(children=explanation, style=center_el),
html.Div(children=[
html.Div(children=[
html.Label(children='Twitter @handle'),
dcc.Input(id='input-box',
type='text',
value='@')],),
html.Button('Submit',
id='submit-button', style=right_el),],
style=center_container),
html.P(id='warning', children='no tweets for this twitter handle.',
style={'display': 'none'}),
html.Div(id='toggle', children=[
html.H2(children='Toxicity Summary',
style={'margin': '120px 0 12px', 'textAlign': 'center'}),
html.P(children='(click bars to see tweets)',
style={'margin': '0 0 50px', 'textAlign': 'center'}),
html.Div(children=[
html.Div(children=[dcc.Graph(id='toxicity-bar')],
style={'margin': '0 50px auto'}),
# html.Div(dt.DataTable(
# rows=[{'text': 'click bar graph above to select tweets',
# 'author': 'na',
# 'time': 'na',
# 'test': 'na',
# 'toxicity': 'na'}],
# row_selectable=True,
# filterable=True,
# sortable=True,
# #row_height=40,
# selected_row_indices=[],
# id='datatable'
# ), style={'margin': '10px auto'})
html.Div(id='table-container', style={'margin': '0 50px 0 50px', 'minWidth': '650px'})
],
style=top_container),
#dcc.Graph(id='toxicity-area', style={'margin': '100px 10px 100px 10px'}),
html.H2(children='Toxicity over time',
style={'margin': '120px 0 12px', 'textAlign': 'center'}),
html.P(children='(click on time-series to see tweets)',
style={'margin': '0 0 50px', 'textAlign': 'center'}),
html.Div(children=[
dcc.Graph(id='toxicity-over-time', style={'minHeight': '500px','flex': '3'}),
html.Div(children=[html.Div(children='click on time-series to see tweets...',
id='full-text', style={'marginTop': '50px'}),
html.Div(id='join-link',
children=[html.A(
html.Button(children=['Join the conversation!']),
href='https://twitter.com'),]),
],
style=right_text)
], style={'display': 'flex'})
]),
html.Div(id='signal', style={'display': 'none'}),
], style={'color': 'black',
'left': 0,
'top': 0,
'width': '100%',
'height': '100%',
'overflow': 'scroll',
'position': 'fixed',
'backgroundColor': colors['background']})
@app.callback(Output('signal', 'children'),
[Input('submit-button', 'n_clicks')],
state=[State('input-box', 'value')])
def request_scores(n_clicks, input_value):
"""
Initiates tweet -> score lookup when clicking submit
Will look for the @handle in redis before starting
request process. Data is signaled through invisible div
so that it can be used for multiple visualizations without
blocking or doing weird things with state.
"""
if n_clicks:
print('request_scores')
try:
return global_store(input_value).to_json(date_format='iso',
orient='split')
except Exception as e:
print('**ERROR**')
print(e)
print(input_value)
@app.callback(Output('input-box', 'value'),
[Input('signal', 'children')],
state=[State('input-box', 'value')])
def reset(tweets, input_value):
"""
Clear input box after user clicks submit.
"""
if not input_value or input_value == '@':
raise PreventUpdate('no data yet!')
return '@'
@app.callback(Output('toggle', 'style'),
[Input('submit-button', 'n_clicks')],
state=[State('input-box', 'value')])
def toggle_graphs(n_clicks, value):
"""
show graphs after first submission
"""
if n_clicks:
return {'display': 'block'}
else:
return {'display': 'none'}
@app.callback(Output('warning', 'style'),
[Input('submit-button', 'n_clicks'),
Input('signal', 'children')])
def toggle_warning(n_clicks, signal):
"""
displays warning message if twitter handle returns 0 tweets
or errors.
"""
if signal or n_clicks <= 0:
return {'display': 'none'}
else:
return warning
# @app.callback(Output('join-link', 'children'),
# [Input('submit-button', 'n_clicks')],
# state=[State('input-box', 'value')])
# def make_link(n_clicks, value):
# """
# Create a link to target's twitter profile
# """
# if not value or not n_clicks or value == '@':
# raise PreventUpdate('no data yet!')
# return html.A(html.Button(children=['Join the conversation!']),
# id='join-link',
# href='https://twitter.com/' + value[1:len(value)])
@app.callback(Output('join-link', 'children'),
[Input('toxicity-over-time', 'clickData'),
Input('signal', 'children')])
def make_link_specific(clickData, tweets_json):
"""
Create a link to tweeter's twitter profile
"""
if not tweets_json or not clickData:
raise PreventUpdate('no data yet!')
tweets_df = | pd.read_json(tweets_json, orient='split') | pandas.read_json |
# Copyright 2019 Toyota Research Institute. All rights reserved.
"""
Module and scripts for training and predicting with models,
given a matching descriptor set.
Usage:
run_model [INPUT_JSON] [--fit]
Options:
-h --help Show this screen
--fit <true_or_false> [default: False] Fit model
--version Show version
The `run_model` script will generate a model and create predictions
based on the features previously generated by the featurize module.
It stores its outputs in `/data-share/predictions/`
The input json must contain the following fields
* `file_list` - list of files corresponding to model features
The output json will contain the following fields
* `file_list` - list of files corresponding to model predictions
Example:
$ run_model '{"file_list": ["/data-share/features/FastCharge_2_CH29_full_model_features.json"]}'
{
"file_list": ["/data-share/predictions/FastCharge_2_CH29_full_model_predictions.json"]
}
"""
from __future__ import division
import os
import json
import pandas as pd
import numpy as np
import datetime
from docopt import docopt
from monty.json import MSONable
from monty.serialization import loadfn, dumpfn
from beep.collate import scrub_underscore_suffix, add_suffix_to_filename
from sklearn.linear_model import Lasso, LassoCV, RidgeCV, Ridge, ElasticNetCV, \
ElasticNet, MultiTaskElasticNet, MultiTaskElasticNetCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from beep.utils import KinesisEvents
from beep import MODEL_DIR, ENVIRONMENT, logger, __version__
s = {'service': 'DataAnalyzer'}
# Projects that have cycling profiles compatible with the FastCharge model should be included in the list below
DEFAULT_MODEL_PROJECTS = ['FastCharge', 'ClosedLoopOED', '2017-05-12', '2017-06-30', '2018-04-12']
assert all('_' not in name for name in DEFAULT_MODEL_PROJECTS)
class DegradationModel(MSONable):
"""
Object corresponding to prediction model.
Attributes:
name (str): instance name.
model (dict): model specific parameters.
"""
def __init__(self, name, model):
"""
Args:
name (str):
model (dict):
"""
self.name = name
self.model = model
@classmethod
def from_name(cls, model_name='full_model'):
"""
Args:
model_name (str): name of method for featurization.
"""
if model_name == 'full_model':
return cls.init_full_model()
else:
raise NotImplementedError
@classmethod
def from_serialized_model(cls, model_dir='data-share/model/', serialized_model=None):
if serialized_model is None:
raise ValueError('Please specify model name stored in {}'.format(model_dir))
elif not os.path.exists(os.path.join(model_dir, serialized_model)):
raise ValueError("Path invalid")
else:
trained_model = loadfn(os.path.join(model_dir, serialized_model))
return cls(name=serialized_model.split('.')[0], model=trained_model)
@classmethod
def init_full_model(cls):
"""
Predict using model coefficients generated by fitting to
D3Batt early prediction manuscript data using BEEP codebase.
Returns:
DegradationModel
"""
coefs = np.array([77.64331966, -3.38357566, 0.0 , 0.0 ,
9.48943144, -0.0 , -0.0 , -0.0 ,
-0. , 0.0 , -172.1890952 , -61.6947121 ,
5.22729452, 0.0 , 0.0 , 0.0 ,
0.0 , -31.84889315, -0.0 , -0.0 ])
mu = np.array([1.06823975e+00, 7.73847312e-01, 1.07329200e+00, 1.13981637e+06,
6.34888946e+02, -1.36155073e+00, -1.72133649e+00, -3.47084255e+00,
-1.22713574e+00, 2.63323211e-01, -1.47651181e+00, 3.73629665e+01,
2.95566653e+01, -1.36504373e-05, 1.07594232e+00, -6.87024033e-05,
1.08010237e+00, 1.67499372e-02, 1.73228530e-02, -3.04463017e-04])
sigma = np.array([1.41520865e-02, 1.95217699e-01, 1.59416725e-02, 7.23228725e+06,
5.65537087e+01, 2.58001513e-01, 2.70289386e-01, 4.51079117e-01,
6.71019154e-01, 2.05683149e-01, 2.25143929e-01, 1.71008400e+00,
5.69541083e-01, 5.98941337e-05, 1.44721412e-02, 6.14265444e-05,
1.48826794e-02, 6.54248192e-04, 7.19974520e-04, 5.86172005e-04])
model = {'coef_': coefs}
name = 'full_model'
date_string = datetime.datetime.now()
trained_model = {'model_type': 'linear',
'model': model,
'confidence_bounds': 0.1,
'regularization_type': 'elasticnet',
'timestamp': date_string.isoformat(),
'dataset_id': None,
'hyperparameters': {},
'featureset_name': 'full_model',
'predicted_quantity': 'cycle',
'mu': np.array(mu),
'sigma': np.array(sigma)
}
return cls(name, trained_model)
def serialize(self, processed_dir='data-share/model'):
"""
Args:
processed_dir (dict): target directory.
Returns:
"""
if not os.path.exists(processed_dir):
os.makedirs(processed_dir)
dumpfn(self.model, os.path.join(processed_dir, self.name + '.model'))
def as_dict(self):
"""
Method for dictionary serialization.
Returns:
dict: corresponding to dictionary for serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"name": self.name,
"model": self.model,
}
@classmethod
def from_dict(cls, d):
"""
MSONable deserialization method.
Args:
d (dict):
Returns:
beep.run_model.DegradationModel:
"""
return cls(**d)
@classmethod
def train(cls, list_of_featurized_jsons, dataset_id=None,
model_type='linear', regularization_type='elasticnet',
model_name='custom_model', hyperparameters=None):
"""
Class method to accept a json string of featurized cycler run files
and output model coefficients. Trained models are serialized as
a dictionary and pushed into a local file.
Model coefficients are initialized after training.
Args:
list_of_featurized_jsons (str): json string of featurized cycler run files.
dataset_id (str): unique_id corresponding to a list of run_ids that are used
for model training.
model_type (str): linear or random_forest.
regularization_type (str): lasso or ridge or elasticnet
(cv estimator chosen by default).
model_name (str): custom name for the model.
hyperparameters (dict): dictionary with the following attributes:
random_state, test_size, k_fold, tol, l1_ratio.
"""
if hyperparameters is None:
hyperparameters = {'random_state': 2,
'test_size': 0.3,
'k_fold': 5,
'tol': 0.0001,
'l1_ratio': [.1, .5, .7, .9, .95, 1],
'max_iter': 1000000
}
X, y, featureset_name, predicted_quantity = assemble_predictors(list_of_featurized_jsons)
if model_type.lower() == 'random_forest':
model, mu, s, relative_prediction_error, Rsquare, hyperparameters_optimized =\
train_linear_model(X, y, **hyperparameters)
elif model_type.lower() == 'linear':
model, mu, s, relative_prediction_error, Rsquare, hyperparameters_optimized = \
train_linear_model(X, y, **hyperparameters)
else:
raise NotImplementedError
# Book-keeping
date_string = datetime.datetime.now()
trained_model = {'model_type': model_type.lower(),
'model': model.__dict__,
'confidence_bounds': relative_prediction_error,
'Rsquare': Rsquare,
'regularization_type': regularization_type,
'timestamp': date_string.isoformat(),
'dataset_id': dataset_id,
'hyperparameters': hyperparameters_optimized,
'featureset_name': featureset_name,
'predicted_quantity': predicted_quantity
}
if model_type.lower() == 'linear':
trained_model['mu'] = np.array(mu)
trained_model['sigma'] = np.array(s)
return cls(name=model_name, model=trained_model)
def predict(self, features):
"""
Args:
features (beep.featurize.DegradationPredictor): features in
DegradationPredictor format.
Returns:
prediction (float): float corresponding to predicted value.
"""
if self.model['model_type'].lower() == 'linear':
X = (features.X - self.model['mu'])/self.model['sigma']
else:
X = features.X
coefs = self.model['model']['coef_']
prediction = np.nansum((np.array(X)* coefs), axis=1) + self.model['model']['intercept_']
if prediction.ndim == 2:
prediction = prediction.reshape((prediction.shape[1],))
return prediction
def prediction_to_dict(self, prediction, nominal_capacity=1.1):
"""
Args:
prediction (float or [float]): float or list of floats
corresponding to predictions.
nominal_capacity (float): Nominal capacity of the cell.
Returns:
dict: dictionary with predictions, error bars and model name.
"""
output_dict = {}
if len(prediction) == 1:
output_dict['discharge_capacity'] = nominal_capacity*0.8
output_dict['fractional_error'] = self.model['confidence_bounds']
else:
output_dict['discharge_capacity'] = np.around(np.arange(.98, 0.78, -0.03), 2) * nominal_capacity
# For now the API is only serving up a single value as the confidence interval
# the model is producing MSE values for each of the prediction points, so the correct value over
# the full set of prediction point should be the average of the MSE for each of the points
# In the future the fractional errors can be an array, once the API and UI are ready for that
# Taking the average is equivalent to
output_dict['fractional_error'] = np.asarray([np.average(self.model['confidence_bounds'])])
output_dict['cycle_number'] = prediction
output_dict['model_type'] = self.model['model_type']
output_dict['predicted_quantity'] = self.model['predicted_quantity']
# Ideally, we will have a model_id, but using model_name for now.
output_dict['model_name'] = self.name
return output_dict
def train_linear_model(X, y, random_state=1, test_size=0.2,
regularization_type='elasticnet', k_fold=5,
max_iter=1000000, tol=0.0001,
l1_ratio=None):
"""
Function to train linear model with regularization and cross-validation.
Args:
X (pandas.DataFrame): dataframe of descriptors.
y (pandas.DataFrame): dataframe of cycle lifetimes.
random_state (int): seed for train/test split.
test_size (float): proportion of the dataset reserved for model evaluation.
regularization_type (str): lasso or ridge or elastic-net (with cv).
k_fold (int): k in k-fold cross-validation.
max_iter (int): maximum number of iterations for model fitting.
tol (float): tolerance for optimization.
l1_ratio ([float]): list of lasso to ridge ratios for elasticnet.
Returns:
sklearn.linear_model.LinearModel: fitted model.
mu (float): Mean value of descriptors used in training.
s (float): Std dev of descriptors used in training.
"""
if l1_ratio is None:
l1_ratio = [.1, .5, .7, .9, .95, 1]
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=test_size, random_state=random_state)
# Standardize (training) data after train/test split
mu = np.mean(X_train, axis=0)
s = np.std(X_train, axis=0)
X_scaled = (X_train - mu) / s
hyperparameters = {'random_state': random_state,
'test_size': test_size,
'k_fold': k_fold,
'tol': tol,
'max_iter': max_iter
}
if regularization_type == 'lasso' and y.shape[1] == 1:
lassocv = LassoCV(fit_intercept=True, alphas=None, tol=tol,
cv=k_fold, max_iter=max_iter)
lassocv.fit(X_scaled, y_train.values.ravel())
# Set optimal alpha and refit model
alpha_opt = lassocv.alpha_
linear_model = Lasso(fit_intercept=True, alpha=alpha_opt,
max_iter=max_iter)
linear_model.fit(X_scaled, y_train.values)
hyperparameters['l1_ratio'] = 1
elif regularization_type == 'ridge' and y.shape[1] == 1:
ridgecv = RidgeCV(fit_intercept=True, alphas=None, cv=k_fold)
ridgecv.fit(X_scaled, y_train.values.ravel())
# Set optimal alpha and refit model
alpha_opt = ridgecv.alpha_
linear_model = Ridge(fit_intercept=True, alpha=alpha_opt)
linear_model.fit(X_scaled, y_train)
hyperparameters['l1_ratio'] = 0
elif regularization_type == 'elasticnet' and y.shape[1] == 1:
elasticnetcv = ElasticNetCV(fit_intercept=True, normalize=False,
alphas=None, cv=k_fold,
l1_ratio=l1_ratio, max_iter=max_iter)
elasticnetcv.fit(X_scaled, y_train.values.ravel())
# Set optimal alpha and l1_ratio. Refit model
alpha_opt = elasticnetcv.alpha_
l1_ratio_opt = elasticnetcv.l1_ratio_
linear_model = ElasticNet(fit_intercept=True, normalize=False,
l1_ratio=l1_ratio_opt,
alpha=alpha_opt, max_iter=max_iter)
linear_model.fit(X_scaled, y_train)
hyperparameters['l1_ratio'] = l1_ratio_opt
# If more than 1 outcome present, perform multitask regression
elif regularization_type == 'elasticnet' and y.shape[1] > 1:
multi_elasticnet_CV = MultiTaskElasticNetCV(fit_intercept=True, cv=k_fold,
normalize=False,
l1_ratio=l1_ratio, max_iter=max_iter)
multi_elasticnet_CV.fit(X_scaled, y_train)
# Set optimal alpha and l1_ratio. Refit model
alpha_opt = multi_elasticnet_CV.alpha_
l1_ratio_opt = multi_elasticnet_CV.l1_ratio_
linear_model = MultiTaskElasticNet(fit_intercept=True, normalize=False,
max_iter=max_iter)
linear_model.set_params(alpha=alpha_opt, l1_ratio=l1_ratio_opt)
linear_model.fit(X_scaled, y_train)
hyperparameters['l1_ratio'] = l1_ratio_opt
else:
raise NotImplementedError
y_pred = linear_model.predict((X_test-mu)/s)
Rsq = linear_model.score((X_test - mu) / s, y_test)
# Compute 95% confidence interval
# Multioutput = 'raw_values' provides prediction error per output
pred_actual_ratio = [x/y for x, y in zip(y_pred, np.array(y_test))]
relative_prediction_error = 1.96*np.sqrt(mean_squared_error(np.ones(y_pred.shape),
pred_actual_ratio,
multioutput='raw_values')/y_pred.shape[0])
hyperparameters['alpha'] = alpha_opt
return linear_model, mu, s, relative_prediction_error, Rsq, hyperparameters
def assemble_predictors(file_list_json):
"""
Method to assemble predictor dataframe from a json string of paths to feature vectors.
Args:
file_list_json (str): json string corresponding to a dictionary
with a file_list attribute.
Returns:
pandas.DataFrame: Dataframe of size (n,m). n = number of cells, m = number of features.
pandas.Series: Series of length n.
"""
if file_list_json.endswith(".json"):
file_list_data = loadfn(file_list_json)
else:
file_list_data = json.loads(file_list_json)
X = pd.DataFrame()
y = pd.DataFrame()
for path in file_list_data['file_list']:
features = loadfn(path)
X = X.append(features.X)
if isinstance(features.y, (int, float)):
y = y.append( | pd.DataFrame([features.y]) | pandas.DataFrame |
import pandas as pd
import os
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import gc
# Helper function to load full dataset with json columns and generate the train features out of it
def load_df(csv_path, json_columns, features):
ans = | pd.DataFrame() | pandas.DataFrame |
import logging
from typing import Optional
import numpy as np
import pandas as pd
from sklearn import utils
from lob_data_utils import lob, model
from sklearn.decomposition import PCA
from sklearn.svm import SVC
logger = logging.getLogger(__name__)
class SvmGdfResults(object):
def __init__(self, stock, r=1.0, s=1.0, data_length=10000, gdf_filename_pattern='',
data_dir='../data/data_gdf', reg_data_dir='../data/prepared'):
self.stock = stock
self.r = r
self.s = s
self.data_length = data_length
self.gdf_filename_pattern = gdf_filename_pattern
self.data_dir = data_dir
self.reg_data_dir = reg_data_dir
self.df, self.df_test = self._read_stock()
all_gdf = ['gdf_{}'.format(i) for i in range(0, 50)]
all_gdf_que = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance']
all_gdf_que_prev = ['gdf_{}'.format(i) for i in range(0, 50)] + ['queue_imbalance', 'prev_queue_imbalance']
feature_columns_dict = {
'que': ['queue_imbalance'],
'que_prev': ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_24_26': ['gdf_24', 'gdf_25'],
'gdf_24-26_que': ['gdf_24', 'gdf_25', 'queue_imbalance'],
'gdf_24-26_que_prev': ['gdf_24', 'gdf_25', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_23-27': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26'],
'gdf_23-27_que': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance'],
'gdf_23-27_que_prev': ['gdf_23', 'gdf_24', 'gdf_25', 'gdf_26', 'queue_imbalance', 'prev_queue_imbalance'],
'gdf_20_30': ['gdf_{}'.format(i) for i in range(20, 30)],
'gdf_20_30_que': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance'],
'gdf_20_30_que_prev': ['gdf_{}'.format(i) for i in range(20, 30)] + ['queue_imbalance', 'prev_queue_imbalance'],
'gdf_0_50': all_gdf,
'gdf_0-50_que': all_gdf_que,
'gdf_0-50_que_prev': all_gdf_que_prev,
'pca_gdf1': all_gdf,
'pca_gdf2': all_gdf,
'pca_gdf3': all_gdf,
'pca_gdf4': all_gdf,
'pca_gdf5': all_gdf,
'pca_gdf6': all_gdf,
'pca_gdf7': all_gdf,
'pca_gdf8': all_gdf,
'pca_gdf9': all_gdf,
'pca_gdf10': all_gdf,
'pca_n_gdf': all_gdf,
'pca_gdf_que1': all_gdf_que,
'pca_gdf_que2': all_gdf_que,
'pca_gdf_que3': all_gdf_que,
'pca_gdf_que4': all_gdf_que,
'pca_gdf_que5': all_gdf_que,
'pca_gdf_que6': all_gdf_que,
'pca_gdf_que7': all_gdf_que,
'pca_gdf_que8': all_gdf_que,
'pca_gdf_que9': all_gdf_que,
'pca_gdf_que10': all_gdf_que,
'pca_n_gdf_que': all_gdf_que,
'pca_gdf_que_prev1': all_gdf_que_prev,
'pca_gdf_que_prev2': all_gdf_que_prev,
'pca_gdf_que_prev3': all_gdf_que_prev,
'pca_gdf_que_prev4': all_gdf_que_prev,
'pca_gdf_que_prev5': all_gdf_que_prev,
'pca_gdf_que_prev6': all_gdf_que_prev,
'pca_gdf_que_prev7': all_gdf_que_prev,
'pca_gdf_que_prev8': all_gdf_que_prev,
'pca_gdf_que_prev9': all_gdf_que_prev,
'pca_gdf_que_prev10': all_gdf_que_prev,
'pca_n_gdf_que_prev': all_gdf_que_prev,
'pca_gdf_que_prev_split10': all_gdf_que_prev
}
def get_score_for_clf(self, clf, df_test, feature_name, pca=None):
x_test = df_test[self.feature_columns_dict[feature_name]]
if pca:
x_test = pca.transform(x_test)
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
@staticmethod
def get_number_of_pca_components(feature_name: str) -> Optional[int]:
if 'pca_gdf_que_prev_split' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev_split', ''))
if 'pca_gdf_que_prev' in feature_name:
return int(feature_name.replace('pca_gdf_que_prev', ''))
if 'pca_gdf_que' in feature_name:
return int(feature_name.replace('pca_gdf_que', ''))
if 'pca_gdf' in feature_name:
return int(feature_name.replace('pca_gdf', ''))
return None
@classmethod
def split_sequences(cls, sequences, labels, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
end_ix = i + n_steps
if end_ix > len(sequences):
break
seq_x = sequences[i:end_ix]
lab = labels[end_ix - 1]
X.append(seq_x)
y.append(lab)
return np.array(X), np.array(y)
@staticmethod
def get_mean_scores(scores: dict) -> dict:
mean_scores = {}
for k, v in scores.items():
mean_scores[k] = np.mean(v)
return mean_scores
def get_score_for_clf_split_pca(self, clf, df_test, feature_name, pca=None) -> dict:
x_test = df_test[self.feature_columns_dict[feature_name]]
x_test_pca = x_test[[col for col in x_test.columns if 'gdf' in col]]
x_test = x_test[[col for col in x_test.columns if 'gdf' not in col]]
if pca:
x_test_pca = pca.transform(x_test_pca)
for n in range(pca.n_components):
x_test['pca_{}'.format(n)] = x_test_pca[:, n]
y_test = df_test['mid_price_indicator'].values
return model.test_model(clf, x_test, y_test)
def get_pca(self, feature_name) -> Optional[PCA]:
train_x = self.df[self.feature_columns_dict[feature_name]].values
if feature_name in ['pca_n_gdf_que', 'pca_n_gdf_que_prev', 'pca_n_gdf']:
n_components = self.calculate_number_of_components(train_x, threshold=0.99)
else:
n_components = self.get_number_of_pca_components(feature_name)
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_x)
return pca
return None
@classmethod
def calculate_number_of_components(cls, train_x, threshold=0.99) -> int:
pca = PCA(n_components=10)
pca.fit(train_x)
for i in range(1, len(pca.explained_variance_ratio_)):
sum_of_ratio = np.sum(pca.explained_variance_ratio_[0:i])
if sum_of_ratio > threshold:
return i
return 10
def get_classes_weights(self):
y_train = self.df['mid_price_indicator'].values
classes = np.unique(y_train)
class_weight_list = utils.class_weight.compute_class_weight('balanced', classes, y_train)
class_weights = {classes[0]: class_weight_list[0], classes[1]: class_weight_list[1]}
return class_weights
def train_clf_with_split_pca(self, clf, feature_name, method=None):
"""
Deprecated
"""
logger.info('Training %s r=%s s=%s:',
self.stock, self.r, self.s)
train_x = self.df[self.feature_columns_dict[feature_name]]
train_pca = train_x[[col for col in train_x.columns if 'gdf' in col]]
train_x = train_x[[col for col in train_x.columns if 'gdf' not in col]]
n_components = self.get_number_of_pca_components(feature_name)
pca = None
if n_components:
pca = PCA(n_components=n_components)
pca.fit(train_pca)
train_pca = pca.transform(train_pca)
for n in range(n_components):
train_x['pca_{}'.format(n)] = train_pca[:, n]
scores = model.validate_model(clf, train_x, self.df['mid_price_indicator'])
res = {
**self.get_mean_scores(scores),
'stock': self.stock,
'method': method,
'features': feature_name
}
test_scores = self.get_score_for_clf_split_pca(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def get_train_set(self, feature_name='', n_steps=None):
train_x = self.df[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
return train_x, train_y
def get_test_set(self, feature_name='', n_steps=None):
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
test_x = pca.transform(test_x)
if n_steps:
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
return test_x, test_y
def train_mlp(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, plot_name=None, class_weight=None,
should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'mlp'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
m = clf()
model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight) # to have a clean fitted model
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_lstm(self, clf, feature_name='', should_validate=True, method=None,
fit_kwargs=None, compile_kwargs=None, n_steps=None,
plot_name=None, class_weight=None, should_return_model=False):
logger.info('Training %s r=%s s=%s: clf=%s', self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]].values
test_x = self.df_test[self.feature_columns_dict[feature_name]].values
train_y = self.df['mid_price_indicator'].values
test_y = self.df_test['mid_price_indicator'].values
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
test_x = pca.transform(test_x)
if n_steps:
train_x, train_y = self.split_sequences(train_x, train_y, n_steps=n_steps)
test_x, test_y = self.split_sequences(test_x, test_y, n_steps=n_steps)
else:
train_x = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1]))
test_x = np.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1]))
if should_validate:
scores_arrays, m = model.validate_model_lstm(
clf, train_x, train_y, fit_kwargs=fit_kwargs, compile_kwargs=compile_kwargs,
plot_name=plot_name, class_weight=class_weight, print_debug=False)
scores = self.get_mean_scores(scores_arrays)
else:
m = clf()
scores = model.train_model(
m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
class_weight=class_weight)
if not method:
method = 'lstm'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
# m = clf()
# model.train_model(
# m, train_x, train_y, compile_kwargs=compile_kwargs, fit_kwargs=fit_kwargs, is_lstm=True,
# class_weight=class_weight)
test_scores = model.test_model(m, test_x, test_y, is_lstm=True)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
if should_return_model:
return {**res, **test_scores, 'arch': m.to_json()}, m
else:
return {**res, **test_scores, 'arch': m.to_json()}
def train_clf(self, clf, feature_name='', should_validate=True, method=None, class_weight=None):
logger.info('Training %s r=%s s=%s: clf=%s',
self.stock, self.r, self.s, clf)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
if not method:
method = 'logistic'
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'kernel': method,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def train_svm(self, C=np.nan, gamma=np.nan, feature_name='', kernel='rbf', coef0=np.nan, should_validate=True,
class_weight=None):
logger.info('Training %s r=%s s=%s: kernel=%s C=%s gamma=%s coef0=%s',
self.stock, self.r, self.s, kernel, C, gamma, coef0)
if C and gamma and coef0:
clf = SVC(kernel=kernel, C=C, gamma=gamma, coef0=coef0)
elif C and gamma:
clf = SVC(kernel=kernel, C=C, gamma=gamma)
else:
clf = SVC(kernel=kernel)
train_x = self.df[self.feature_columns_dict[feature_name]]
pca = self.get_pca(feature_name)
if pca:
train_x = pca.transform(train_x)
if should_validate:
scores_arrays = model.validate_model(clf, train_x, self.df['mid_price_indicator'],
class_weight=class_weight)
scores = self.get_mean_scores(scores_arrays)
else:
scores = model.train_model(clf, train_x, self.df['mid_price_indicator'], class_weight=class_weight)
components_num = None
if pca:
components_num = pca.n_components_
res = {
**scores,
'stock': self.stock,
'C': C,
'gamma': gamma,
'coef0': coef0,
'kernel': kernel,
'features': feature_name,
'pca_components': components_num
}
test_scores = self.get_score_for_clf(clf, self.df_test, feature_name=feature_name, pca=pca)
logger.info('Finished training %s %s', self.stock, {**res, **test_scores})
return {**res, **test_scores}
def _read_stock(self):
gdf_filename = self.gdf_filename_pattern.format(self.stock, self.r, self.s)
reg_filename = '{}'.format(self.stock)
logger.debug('Will read %s and %s', gdf_filename, reg_filename)
d = lob.load_prepared_data(
gdf_filename, data_dir=self.data_dir, length=self.data_length)
if len(d) == 2:
df, df_test = d
else:
return pd.DataFrame(), | pd.DataFrame() | pandas.DataFrame |
import os
import warnings
import argparse
from pathlib import Path
import netCDF4
import pandas as pd
import numpy as np
from geotiff import GeoTiff
from tqdm import tqdm
from sklearn.metrics import pairwise_distances
from sklearn.model_selection import GroupShuffleSplit
from tools.settings import CLIMATE_OPT, CAT_OPT, FEATURES_COLS, START_VAL_COLS, TARGET_COLS
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(prog='Подготовка данных',
description =
"""
Скрипт формирует данные для обучения Neural ODE
По гипотезе на вход подаются:
- t2m (температура на 2м)
- td2m (точка росы на 2м)
- ff (скорость ветра)
- R (осадки за 6,12,24 часа опционально)
- phi(t) (периодическая ф-ия по времени)
- climate (temp, soil, precip) (климатические характеристики температуры, влагозапаса и осадков)
- soil type (тип почвы)
- cover type (тип подстилающей поверхности)
- kult type (тип выращиваемой культуры)
- val_1, val_2 (ЗПВ на момент времени t0)
На выходе производная по влагозапасу:
- new val_1, val_2 (ЗПВ на момент времени t1)
""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d', '--dist', type=float, default=1, help='Убрать станции с большим расстоянием')
parser.add_argument('-ts', '--test_size', type=float, default=0.1, help='Доля валидационной выборки от общей')
opt = parser.parse_args()
def load_syn(path: str) -> pd.DataFrame:
syn = pd.read_csv(path, usecols=['s_ind', 'datetime', 't2m', 'td2m', 'ff', 'R12'])
syn.loc[syn.datetime.astype(str).str.len() == 7, 'datetime'] = '0'+\
syn[syn.datetime.astype(str).str.len() == 7].datetime.astype(str)
syn.loc[:, 'datetime'] = pd.to_datetime(syn.datetime, format='%y%m%d%H')
return syn
def load_agro(path: str) -> pd.DataFrame:
agro = pd.read_csv(path)
agro.loc[:,'datetime'] = pd.to_datetime(agro.year.astype(str)+agro.month.astype(str)\
+ agro.day.astype(str)+np.ones(len(agro), dtype='str'), format='%Y%m%d%H', origin='unix')
agro = agro.drop(['month', 'day'], axis=1)
agro.loc[:,'prev'] = agro.dec - 1
return agro
def agro_to_event_period(df: pd.DataFrame) -> pd.DataFrame:
df = df.merge(df, left_on=['ind', 'dec', 'year'], right_on=['ind', 'prev', 'year'], suffixes=('', '_next'))
df.loc[:, 'dur'] = (df.datetime_next - df.datetime).dt.days.astype(int)
df.loc[df.dur == 11, 'datetime_next'] = df[df.dur == 11].datetime_next-pd.Timedelta('1d')
df.loc[:, 'dur'] = (df.datetime_next - df.datetime).dt.total_seconds().astype(int)
new_agro = pd.to_datetime((np.repeat(df.datetime.view(int)//int(1e9), 243)\
+ np.hstack([np.arange(0, v, pd.Timedelta('1h').total_seconds()) for v in df.dur+10800.0]))*int(1e9))
new_agro = df.join(new_agro.rename('ts'), how='outer')
return new_agro
def data_fusion(agro: pd.DataFrame, syn: pd.DataFrame, pairs: pd.DataFrame, max_dist: float = 50) -> pd.DataFrame:
syn = syn.merge(pairs[pairs.dist < max_dist], on='s_ind')
data = agro.merge(syn, left_on=['ind', 'ts'], right_on=['ind','datetime'], how='inner')
agr = data.groupby(['ind', 'year', 'dec']).val_1.count()
data = data.set_index(['ind', 'year', 'dec']).loc[agr[agr == 81].index].reset_index()
data.loc[:, ['t2m', 'td2m', 'ff']] = data[['t2m', 'td2m', 'ff']].interpolate(method='polynomial', order=3)
for i,j in data[['s_ind','dec']].drop_duplicates().values:
data.loc[(data.s_ind == i) & (data.dec == j), 'R12'] = \
(data[(data.s_ind == i) & (data.dec == j)].R12/4).fillna(method='bfill', limit=3).fillna(0)
return data
def load_climate(optinons: dict, pairs: pd.DataFrame) -> pd.DataFrame:
path = list(optinons.keys())[0]
nc = netCDF4.Dataset(path)
latmask = np.argmin(pairwise_distances(nc['lat'][:].data.reshape(-1, 1),
pairs['s_lat'].values.reshape(-1, 1)), axis=0)
lonmask = np.argmin(pairwise_distances(nc['lon'][:].data.reshape(-1, 1),
pairs['s_lon'].values.reshape(-1, 1)), axis=0)
climate = pd.DataFrame()
for i in range(12):
df = pairs[['s_ind']].copy()
for path in optinons.keys():
nc = netCDF4.Dataset(path)
df.loc[:, 'month'] = i+1
df.loc[:, optinons[path]] = nc[optinons[path]][i].data[latmask, lonmask]
climate = pd.concat((climate, df), axis=0, ignore_index=True)
return climate.drop_duplicates()
def decode_tif(lat: np.array, lon: np.array, tifname: str) -> np.array:
lon1 = lon.min()
lon2 = lon.max()
lat1 = lat.min()
lat2 = lat.max()
arr = np.array(GeoTiff(tifname).read_box([(lon1, lat1), (lon2, lat2)]))
ilon = np.round((lon-lon1)/(lon2-lon1)*(arr.shape[1]-1)).round().astype(np.int64)
ilat = np.round((lat2-lat)/(lat2-lat1)*(arr.shape[0]-1)).round().astype(np.int64)
out = np.array([arr[ilat[i], ilon[i]] for i in range(ilon.shape[0])])
return out
def load_soil_cats(pathes: list, pairs: pd.DataFrame) -> pd.DataFrame:
lat, lon = pairs.loc[:, 'lat'].to_numpy().astype(int), pairs.loc[:, 'lon'].to_numpy().astype(int)
pairs.loc[:, 'soiltype'] = decode_tif(lat, lon, pathes['soil']['tiff'])
pairs.loc[:, 'covertype'] = decode_tif(lat, lon, pathes['cover']['tiff'])
soil_df = pd.read_csv(pathes['soil']['description'], sep='\t')
cover_df = pd.read_excel(pathes['cover']['description'], usecols=['Value', 'Label'])
soils = pairs.merge(cover_df, left_on='covertype', right_on='Value')\
.merge(soil_df, left_on='soiltype', right_on='GRIDCODE')\
.drop(['Value', 'GRIDCODE', 'lat', 'lon', 's_ind', 'dist', 's_lat', 's_lon'], axis=1)\
.rename(columns={'Label': 'cover_name', 'SOIL_ORDER': 'soil_label', 'SUBORDER': 'suborder'})\
.astype({'covertype':'int64'})
soils.loc[:, 'covertype'] = soils.covertype.map(
{elm: i for i, elm in enumerate(soils.covertype.sort_values().unique())}).astype(int)
soils.loc[:, 'soiltype'] = soils.soiltype.map(
{elm: i for i, elm in enumerate(soils.soiltype.sort_values().unique())}).astype(int)
soils_label = pd.DataFrame()
soils_label.loc[:, 'soiltypes'] = {i: elm for i, elm in enumerate(soils.soil_label.unique())}.keys()
soils_label.loc[:, 'soil_label'] = {i: elm for i, elm in enumerate(soils.soil_label.unique())}.values()
soils = soils.merge(soils_label, on='soil_label')\
.drop('soiltype', axis=1)\
.rename(columns={'soiltypes': 'soiltype'})
return soils
def save_to_npz(data: pd.DataFrame, features: list, start: list, target: list, test_size: float = 0.1) -> None:
k = True
while k:
ind = np.random.choice(data.ind.unique())
year = np.random.choice(data.ts.dt.year.unique())
n = data[(data.ind == ind) & (data.ts.dt.year == year)].dec.nunique()
k = (n > 16) or (n < 11)
data = data.set_index(['ind', 'year', 'dec'])
sample_idx = data.loc[[ind], [year], :].index.unique().to_numpy()
tv_data = data.drop(sample_idx)
gss = GroupShuffleSplit(n_splits=1, test_size=test_size, random_state=42)
train_idx, val_idx = next(gss.split(X=tv_data, y=tv_data[['val_1_next', 'val_2_next']], groups=tv_data.reset_index()['year']))
train_idx, val_idx = np.unique(tv_data.index.to_numpy()[train_idx]), np.unique(tv_data.index.to_numpy()[val_idx])
all_idx = {'train': train_idx, 'val': val_idx, 'sample': sample_idx}
for key in all_idx.keys():
for ind, year, dec in tqdm(all_idx[key], desc=f'Saving {key} to npz'):
v = data.loc[ind, year, dec][features].to_numpy()
z0 = data.loc[ind, year, dec][start].to_numpy()[0]
z1 = data.loc[ind, year, dec][target].to_numpy()[0]
np.savez_compressed(f'data/dataset/{key}/{ind}_{year}_{dec}.npz',
v=v, z0=z0, z1=z1, ind=ind, year=year, dec=dec)
alls = set(Path('data/dataset').rglob('*.npz'))
for path in tqdm(alls, desc="Search data with NaN"):
file = np.load(path)
v, z0, z1 = file['v'], file['z0'], file['z1']
if np.isnan(v).sum() or np.isnan(z0).sum() or np.isnan(z1).sum():
os.remove(path)
def clear_syn(syn: pd.DataFrame):
syn.R12[syn.R12 == 9990] = 0.1
syn = syn[syn.t2m.abs() < 60]
syn = syn[syn.td2m.abs() < 60]
syn = syn[syn.ff <= 30]
return syn
def cat_prep(data: pd.DataFrame):
cover_frac = data[['cover_name']].value_counts().reset_index().rename(columns={0:'perc'})
cover_frac.loc[:, 'perc'] = cover_frac.perc/cover_frac.perc.sum()*100
cover_frac.loc[:, 'cover_name_new'] = cover_frac.cover_name
cover_frac.loc[cover_frac.perc < 5, 'cover_name_new'] = 'Other'
cover_frac = cover_frac.drop(['perc'], axis=1)
soil_frac = data[['soil_label']].value_counts().reset_index().rename(columns={0:'perc'})
soil_frac.loc[:, 'perc'] = soil_frac.perc/soil_frac.perc.sum()*100
soil_frac.loc[:, 'soil_label_new'] = soil_frac.soil_label
soil_frac.loc[soil_frac.perc < 2, 'soil_label_new'] = 'Other'
soil_frac = soil_frac.drop(['perc'], axis=1)
cult = pd.read_csv('data/agro/cult.csv', sep=';').rename(columns={'id': 'kult'})
data = data.merge(cover_frac, on='cover_name')\
.merge(soil_frac, on='soil_label')\
.merge(cult, on='kult')\
.drop(['cover_name', 'soil_label'], axis=1)\
.rename(columns={'cover_name_new': 'cover_name', 'soil_label_new': 'soil_label'})
data.loc[:, 'soiltype'] = data.soil_label.map({elm: i for i,elm in enumerate(data.soil_label.unique())})
data.loc[:, 'covertype'] = data.cover_name.map({elm: i for i,elm in enumerate(data.cover_name.unique())})
data.loc[:, 'culttype'] = data.type.map({elm: i for i,elm in enumerate(data.type.unique())})
return data
if __name__ == '__main__':
paths = {
'agro': 'data/agro/agro.csv',
'pairs': 'data/pairs/pairs.csv',
'syn': list(Path('data/syn').rglob('*.csv'))
}
agro = load_agro(paths['agro'])
agro = agro_to_event_period(agro)
pairs = pd.read_csv(paths['pairs'])
climate = load_climate(CLIMATE_OPT, pairs.copy())
soil = load_soil_cats(CAT_OPT, pairs.copy())
syn = pd.concat([load_syn(file) for file in tqdm(paths['syn'], desc='Load synoptical data')], axis=0)
syn = clear_syn(syn.copy())
data = data_fusion(agro.copy(), syn.copy(), pairs.copy(), max_dist=opt.dist)
data = data.merge(climate, left_on=['s_ind', data.ts.dt.month], right_on=['s_ind','month'])\
.merge(soil, on='ind')
data.loc[:, 'phi'] = np.sin(((data.ts-pd.Timestamp('1970-01-01'))/pd.Timedelta(seconds=1)/ | pd.Timedelta(days=365.24) | pandas.Timedelta |
from __future__ import absolute_import, division, print_function
import sys
import os
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path = [os.path.dirname(os.path.dirname(curr_path)), curr_path] + sys.path
curr_path = None
try:
import cPickle as pickle
except:
import pickle
import logging
import csv
import h5py
import numpy as np
import pandas as pd
import re
import auto_deepnet.utils.exceptions as exceptions
logger = logging.getLogger("auto_deepnet")
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
'''
function: save_pickle_data
inputs:
- file_path: string pathname to save data to
- data_frame: pandas data_frame to save to disk in any picklable format
- pandas_format (optional): whether to save as a pandas dataframe or as a numpy array
- append (optional): whether to append data to preexisting data. Requires data to be in the same format
- mode (optional): The mode to open file as
description:
helper function to save any data to disk via pickling
'''
def save_pickle_data(file_path, data_frame, **kwargs):
logger.info("Opening pickle file {} to write data...".format(file_path))
pandas_format = kwargs.get('pandas_format', True)
append = kwargs.get('append', False)
mode = kwargs.get('mode', 'wb')
if append and os.path.isfile(file_path):
logger.info("Opening file to append data...")
try:
data_frame = pd.concat((load_pickle_data(file_path), data_frame))
except Exception as e:
logger.exception("Error appending data from {}: {}".format(file_path), e)
try:
if 'pandas_format' not in kwargs or pandas_format:
data_frame.to_pickle(file_path)
else:
with open(file_path, mode) as f:
pickle.dump(data_frame.values, f)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileSaveError
logger.info("Successfully saved pickle data")
'''
function: load_pickle_data
inputs:
- file_path: string pathname to load data from
- mode: the mode to open file as
helper function to load any pickled data from disk
'''
def load_pickle_data(file_path, **kwargs):
mode = kwargs.get('mode', 'rb')
logger.info("Opening pickle file {} to read...".format(file_path))
try:
with open(file_path, mode) as f:
data = pickle.load(f)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileLoadError
logger.info("Successfully read pickle data")
return data
'''
function: save_hdf5_data
inputs:
- file_path: string pathname to save data to
- data_frame: the pandas dataframe to save to disk
- key (optional): The name to call the dataset
- pandas_format (optional): whether to save as a pandas structure or default hdf5
- mode (optional): The mode to open file as
- format (optional): whether to save as a table or fixed dataset
- append (optional): Whether data should be appended or replaced
'''
def save_hdf5_data(file_path, data_frame, **kwargs):
pandas_format = kwargs.get('pandas_format', True)
key = kwargs.get('key', 'data')
mode = kwargs.get('mode', 'a')
format = kwargs.get('format', 'table')
append = kwargs.get('append', False)
logger.info("Opening HDF5 file {} to write data...".format(file_path))
try:
if pandas_format:
with pd.HDFStore(file_path, mode=mode) as f:
if key in f and not append:
f.remove(key)
f.put(key=key, value=data_frame, format=format, append=append)
else:
if key == None:
logger.error("Need a key when saving as default HDF5 format")
raise exceptions.FileSaveError
with h5py.File(file_path, mode) as f:
if key in f:
if append:
data_frame = pd.concat((pd.DataFrame(f[key]), data_frame))
del f[key]
f.create_dataset(key, data=data_frame.values)
except Exception as e:
logger.exception("Failed with Error {0}".format(e))
raise exceptions.FileSaveError
logger.info("Successfully saved hdf5 data")
'''
function: load_hdf5_file
inputs:
- file_path: string pathname to load data from
- key (optional): name of the dataset
- pandas_format (optional): whether the file was saved in pandas format
- mode (optional): The mode to open the file as
description:
helper function to load an hdf5 file from disk
'''
def load_hdf5_data(file_path, **kwargs):
key = kwargs.get('key', None)
pandas_format = kwargs.get('pandas_format', True)
mode = kwargs.get('mode', 'r')
logger.info("Opening HDF5 file {} to read...".format(file_path))
try:
if pandas_format:
data = | pd.read_hdf(file_path, key=key, mode=mode) | pandas.read_hdf |
from albumentations.augmentations.transforms import Normalize
import torch.nn as nn
import torchvision.models as models
from torch.utils.data import Dataset
import torch
import albumentations as A
from albumentations.pytorch import ToTensorV2
from pathlib import Path
import numpy as np
import re
import umap
import pandas as pd
from PIL import Image
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.pyplot as plt
""" Programm to evaluate if there is a significant domain gap between two datasets
To see if there is a domain gap, a pretrained Resnet50 is used to extract features from both datasets and UMAP is used for unsupervised clustering. When distinct clusters for both datasets
are formed, there is a domain gap present.
The domain gap can be evaluated for native Tharun and Thompson and upscaled Nikiforov as well as native Nikiforov and downscaled Tharun and Thompson.
Furthermore, it can be evaluated on the native version on both datasets.
"""
native_dataset = "N" # T, N or both
N_folder_20x = Path(__file__).parent.joinpath("..", "datasets", "Nikiforov").resolve()
N_folder_40x = Path(__file__).parent.joinpath("..", "datasets", "Nikiforov_upscale2x").resolve()
T_folder_40x = Path(__file__).parent.joinpath("..", "datasets", "TharunThompson").resolve()
T_folder_20x = Path(__file__).parent.joinpath("..", "datasets", "TharunThompson_downscale2x").resolve()
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff',
'.TIFF', '.tif', '.TIF']
def get_image_paths(folder, substring=None):
image_paths = []
for file in folder.iterdir():
if any(file.suffix == extension for extension in IMG_EXTENSIONS):
if substring==None:
image_paths.append(file)
else:
if substring in file.name:
image_paths.append(file)
return np.asarray(image_paths)
def merge_path_gt(image_paths, ground_truth, dataset):
patient_numbers = np.zeros(len(image_paths))
diagnose_grouped = []
T_paths = np.asarray(image_paths)
for i, image_path in enumerate(image_paths):
# if patient has multiple images e.g. 1a, 1b, ... a,b, ... is removed
patient_numbers[i] = re.sub('[^0-9]', '', image_path.stem.split("_")[0])
diagnose_grouped.append(ground_truth[ground_truth["sample"]==patient_numbers[i]]["diagnose_grouped"].values[0])
unique_patient_numbers = np.unique(patient_numbers)
merged_info = pd.DataFrame(np.array([image_paths, patient_numbers, diagnose_grouped]).transpose(), columns=["path", "patient_number", "diagnose_grouped"])
merged_info["dataset"]= dataset
return merged_info
def draw_scatter(data, scatter_path, target):
umap_plt = sns.scatterplot(data=data, x="UMAP 1", y="UMAP 2", hue=target)
#umap_plt.set(title="Umap thyroid tumor")
umap_fig = umap_plt.get_figure()
umap_fig.savefig(scatter_path, bbox_inches="tight")
plt.close(umap_fig)
def apply_umap(measures, features, native_dataset, target="target", hparams={}):
# only keep patient, feature selection, diagnose
measures_umap = measures.copy()
scaler = StandardScaler()
measures_umap.reset_index(inplace=True)
measures_umap[features] = pd.DataFrame(scaler.fit_transform(measures_umap[features]), columns=features)
reducer = umap.UMAP(**hparams)
embedding = reducer.fit_transform(measures_umap[features].values)
embedding = pd.DataFrame(list(zip(embedding[:,0], embedding[:,1], measures_umap[target], measures_umap["path"])), columns=["UMAP 1", "UMAP 2", target, "path"])
draw_scatter(embedding, Path(__file__).parent.joinpath("domain_gap_"+target+"_native"+native_dataset+"_umap.png"), target)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class DomainGapDataset(Dataset):
def __init__(self, dataset_info, transform=None):
self.dataset_info = dataset_info
self.transform = transform
def __len__(self):
return len(self.dataset_info)
def __getitem__(self, index):
img = Image.open(self.dataset_info["path"][index])
target = self.dataset_info["diagnose_grouped"][index]
if self.transform is not None:
data = self.transform(image=np.array(img), target= target)
return data
def extract_dl_features(image_info, features_path):
trans = A.Compose([
A.Normalize(),
ToTensorV2()
])
dataset = DomainGapDataset(image_info, transform=trans)
loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0)
net = models.resnet50(pretrained=True)
net.fc = Identity()
net.to(torch.device("cuda"))
net.eval()
dl_features = np.zeros([len(loader), 2048])
with torch.no_grad():
for step, item in enumerate(loader):
item["image"]= item["image"].to(torch.device("cuda"))
features = net(item["image"]).cpu().numpy()
dl_features[step,:] = features.squeeze()
columns = ["feature_"+str(i) for i in range(dl_features.shape[1])]
dl_features_pd = | pd.DataFrame(data=dl_features, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import skimage.measure as measure
import skimage.morphology as morphology
from pcnaDeep.data.utils import filter_edge
def split_frame(frame, n=4):
"""Split frame into several quadrants.
Args:
frame (numpy.ndarray): single frame slice to split, shape HWC, if HW, will expand C.
n (int): split count, either 4 or 9.
Returns:
numpy.ndarray: stack of split slice, order by row.
"""
if n not in [4, 9]:
raise ValueError('Split number should be 4 or 9.')
if frame.shape[0] != frame.shape[1]:
raise ValueError('Frame should be square.')
if len(frame.shape) < 3:
frame = np.expand_dims(frame, axis=2)
if frame.shape[0] / n != int(frame.shape[0] / n):
pd_out = (frame.shape[0] // n + 1) * n - frame.shape[0]
frame = np.pad(frame, ((0, pd_out), (0, pd_out), (0, 0)), 'constant', constant_values=(0,))
row = np.split(frame, np.sqrt(n), axis=0)
tile = []
for r in row:
tile.extend(np.split(r, np.sqrt(n), axis=1))
return np.stack(tile, axis=0)
def join_frame(stack, n=4, crop_size=None):
"""For each n frame in the stack, join into one complete frame (by row).
Args:
stack (numpy.ndarray): tiles to join.
n (int): each n tiles to join, should be either 4 or 9.
crop_size (int): crop the square image into certain size (lower-right), default no crop.
Returns:
numpy.ndarray: stack of joined frames.
"""
if n not in [4, 9]:
raise ValueError('Join tile number should either be 4 or 9.')
if stack.shape[0] < n or stack.shape[0] % n != 0:
raise ValueError('Stack length is not multiple of tile count n.')
p = int(np.sqrt(n))
out_stack = []
stack = stack.astype('uint16')
for i in range(int(stack.shape[0] / n)):
count = 1
frame = []
for j in range(p):
row = []
for k in range(p):
new_stack, count_add = relabel_seq(stack[int(j * p + k + i * n), :], base=count)
count += count_add
row.append(new_stack)
row = np.concatenate(np.array(row), axis=1)
frame.append(row)
frame = np.concatenate(np.array(frame), axis=0)
out_stack.append(frame)
out_stack = np.stack(out_stack, axis=0)
if crop_size is not None:
out_stack = out_stack[:, :crop_size, :crop_size, :]
if np.max(stack) <= 255:
out_stack = out_stack.astype('uint8')
else:
out_stack = out_stack.astype('uint16')
return out_stack
def join_table(table, n=4, tile_width=1200):
"""Join object table according to tiled frames.
Args:
table (pandas.DataFrame): object table to join,
essential columns: frame, Center_of_the_object_0 (x), Center_of_the_object_1 (y).
The method will join frames by row.
n (int): each n frames form a tiled slice, either 4 or 9.
tile_width (int): width of each tile.
Returns:
pandas.DataFrame: object table for further processing (tracking, resolving)
"""
NINE_DICT = {0: (0, 0), 1: (0, 1), 2: (0, 2), 3: (1, 0), 4: (1, 1),
5: (1, 2), 6: (2, 0), 7: (2, 1), 8: (2, 2)}
FOUR_DICT = {0: (0, 0), 1: (0, 1), 2: (1, 0), 3: (1, 1)}
if n not in [4, 9]:
raise ValueError('Join tile number should either be 4 or 9.')
if (np.max(table['frame']) + 1) < n or (np.max(table['frame']) + 1) % n != 0:
raise ValueError('Stack length is not multiple of tile count n.')
out = | pd.DataFrame(columns=table.columns) | pandas.DataFrame |
import random
import cv2
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from .keypoint_encoder import KeypointEncoder
class FashionAIKeypoints(Dataset):
def __init__(self, opt, phase='train'):
self.opt = opt
self.phase = phase
self.encoder = KeypointEncoder()
# Read csv
# TODO: Need a cleaner way to combine pathlib and path in pd columns.
if phase == 'test':
data_dir = opt.db_path / 'r2_test_b/'
anno_df = pd.read_csv(data_dir / 'test.csv')
anno_df['image_path'] = str(data_dir) + '/' + anno_df['image_id']
else:
data_dir0 = opt.db_path / 'wu_train/'
anno_df0 = | pd.read_csv(data_dir0 / 'Annotations/annotations.csv') | pandas.read_csv |
"""Support Disperse I/O.
The reader is based on a description of the structure using Kaitai (https://kaitai.io/).
"""
import numpy as np
import pandas as pd
from ..utilities.decorators import read_files
from ..utilities.types import FloatArrayType, PathType
from .disperse_reader import DisperseReader
class Disperse:
"""Read a disperse NDskl file.
Parameters
----------
fname : str, filename
"""
@read_files(1)
def __init__(self, fname: PathType):
self.fname = fname
self.read()
def read(self):
ds = DisperseReader.from_file(self.fname)
# Gather data
nnode, nnode_data, ndim = ds.header.nnode, ds.header.nnode_data, ds.header.ndim
node_pos = np.asarray(ds.data.node_pos).reshape((nnode, ndim))
node_data = np.asarray(ds.data.node_data).reshape((nnode, nnode_data))
nseg, nseg_data = ds.header.nseg, ds.header.nseg_data
seg_data = np.asarray(ds.data.seg_data).reshape((nseg, nseg_data))
# # Gather information about nodes
_v1 = pd.DataFrame(
[
(*node_pos[s0.pos_index], s0.index, s0.flags, s0.type)
for s0 in ds.data.node_data_int
],
columns=["x", "y", "z", "index", "flags", "type"],
).set_index("index")
_v2 = pd.DataFrame(
node_data,
columns=[_.replace("\x00", "") for _ in ds.header.node_data_info],
index=_v1.index,
)
# Parse what's integer as integer
int_columns = ["parent_index", "persistence_pair"]
for c in int_columns:
_v2[c] = _v2[c].astype(int)
node_ds = pd.concat((_v1, _v2), axis=1)
node_ds["type_s"] = [
("void", "wall", "filament", "peak")[_] for _ in node_ds.type
]
# # Gather information about segments
_v1 = pd.DataFrame(
[
(s0.index, s0.node_ids[0], s0.node_ids[1], s0.prev_seg, s0.next_seg)
for s0 in ds.data.seg_data_int
],
columns=["index", "node_start", "node_end", "seg_prev", "seg_next"],
)
_v1 = _v1.set_index("index")
_v2 = pd.DataFrame(
seg_data,
columns=[_.replace("\x00", "") for _ in ds.header.seg_data_info],
index=_v1.index,
)
# Parse what's integer as integer
int_columns = ["type", "orientation"]
for c in int_columns:
_v2[c] = _v2[c].astype(int)
# Replace off-bound values by dummy values
_v1.loc[_v1["seg_prev"] > ds.header.nseg, "seg_prev"] = ds.header.nseg
_v1.loc[_v1["seg_next"] > ds.header.nseg, "seg_next"] = ds.header.nseg
seg_ds = | pd.concat((_v1, _v2), axis=1) | pandas.concat |
from this import d
import keras.losses
import matplotlib.pyplot as plt
import streamlit as st
import tensorflow as tf
from keras import layers
from keras.models import Sequential
from random import randint
import visualkeras
import pandas as pd
from streamlit_drawable_canvas import st_canvas
from cv2 import resize
from os.path import exists
import numpy as np
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data(path="mnist.npz")
x_train, x_test = x_train / 255.0, x_test / 255.0
st.title("rAI Generator")
"""
Hi all, I am Rai, and this is my project, rAI. rAI is an educational interactive data science webapp built to help familiarize people
with simple neural networks by constructing a model that functions great on paper, but is *exceedingly bad* in real life (like me, hence rAI).
rAIs are bad, like really bad, because they are the simplest a neural network can be. No convolutional laters, no LSTMs, just simple Dense layers feeding into each others.
Though this webapp is built with those who has zero experient with ML in mind, so this oversimplicity is at least intended.
"""
st.header("Oversimplification of a Neural Network")
st.write("When the topic of neural network is mentioned, some might visualize something like this in their head:")
ex = Sequential()
ex.add(layers.Dense(5, input_shape=(5,)))
ex.add(layers.Dense(8))
ex.add(layers.Dense(4))
ex._layers = ex._self_tracked_trackables
visualkeras.graph_view(ex, to_file="ex.png")
st.image("ex.png", caption="Simple neural network architecture")
"""
A web of *things* connecting with other *things* by *things*, and they would be right! Except that these things are more than just things, they're *numbers*.
Every node in the network, called a "neuron", holds a number (in this case) between 0 and 1, called its "activation". In the first layer of neurons,
you input in the values for their activation, and whatever activation in the final layer is, that is your output. How you *get* from the first layer to the
final layer is the goal of every neural network.
So how exactly do you get from one layer to another? That's right, by doing *math*. In a Dense layer, every neurons are connected with all neurons in its preceeding layers,
hence *Dense*. A neuron connected this way will have its activated calculated by the sum of all the activations of all neurons connected to it
multiplied by the strength of their connections, or "weight". For example, the activation of a neuron on layer two would be calculated by:
"""
st.latex("\sum_{n=0}^5 a_n \cdot w_n")
"""
You can notice that this sum can mean the activations of preceeding layers will deviate greatly from the original activation values between 0-1. Sometimes, this
is exactly what we want. Sometimes, we wish to avoid that. So how to we tell these neurons how to handle these deviations? That's right, we give them different
flavors, in the form of "activation functions". While some functions will normalize the results to be between 0-1 like sigmoid and softmax,
others aim to do something completely different, like making all negative values into 0s, like ReLU.
"""
x = tf.convert_to_tensor(np.arange(-5.0, 6.0, 0.1))
def softmax(x):
f_x = np.exp(x) / np.sum(np.exp(x))
return f_x
act_func_map = {
"Softmax": softmax(x),
"ReLU": keras.activations.relu(x),
"Sigmoid": keras.activations.sigmoid(x),
"Tanh": keras.activations.tanh(x)
}
act_func = st.selectbox("Pick an activation function to see what they do:", ("Softmax", "ReLU", "Sigmoid", "Tanh"))
st.line_chart( | pd.DataFrame(act_func_map[act_func], x) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ## DS/CMPSC 410 Sparing 2021
# ## Instructor: Professor <NAME>
# ## TA: <NAME> and <NAME>
# ## Lab 6: Movie Recommendations Using Alternative Least Square
# ## The goals of this lab are for you to be able to
# ### - Use Alternating Least Squares (ALS) for recommending movies based on reviews of users
# ### - Be able to understand the raionale for splitting data into training, validation, and testing.
# ### - Be able to tune hyper-parameters of the ALS model in a systematic way.
# ### - Be able to store the results of evaluating hyper-parameters
# ### - Be able to select best hyper-parameters and evaluate the chosen model with testing data
# ### - Be able to improve the efficiency through persist or cache
# ### - Be able to develop and debug in ICDS Jupyter Lab
# ### - Be able to run Spark-submit (Cluster Mode) in Bridges2 for large movie reviews dataset
#
# ## Exercises:
# - Exercise 1: 5 points
# - Exercise 2: 5 points
# - Exercise 3: 5 points
# - Exercise 4: 10 points
# - Exercise 5: 5 points
# - Exercise 6: 15 points
# - Exercise 7: 30 points
# ## Total Points: 75 points
#
# # Due: midnight, February 28, 2021
# # Submission of Lab 6
# - 1. Completed Jupyter Notebook of Lab 6 (Lab6A.ipynb) for small movie review datasets (movies_2.csv, ratings_2.csv).
# - 2. Lab6B.py (for spark-submit on Bridges2, incorporated all improvements from Exercise 6, processes large movie reviews)
# - 3. The output file that has the best hyperparameter setting for the large movie ratings files.
# - 4. The log file of spark-submit on Lab6B.py
# - 5. A Word File that discusses (1) your answer to Exercise 6, and (2) your results of Exercise 7, including screen shots of your run-time information in the log file.
# ## The first thing we need to do in each Jupyter Notebook running pyspark is to import pyspark first.
# In[37]:
import pyspark
# ### Once we import pyspark, we need to import "SparkContext". Every spark program needs a SparkContext object
# ### In order to use Spark SQL on DataFrames, we also need to import SparkSession from PySpark.SQL
# In[38]:
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.types import StructField, StructType, StringType, LongType, IntegerType, FloatType
from pyspark.sql.functions import col, column
from pyspark.sql.functions import expr
from pyspark.sql.functions import split
from pyspark.sql import Row
from pyspark.mllib.recommendation import ALS
# from pyspark.ml import Pipeline
# from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, IndexToString
# from pyspark.ml.clustering import KMeans
# ## We then create a Spark Session variable (rather than Spark Context) in order to use DataFrame.
# - Note: We temporarily use "local" as the parameter for master in this notebook so that we can test it in ICDS Roar. However, we need to change "local" to "Yarn" before we submit it to XSEDE to run in cluster mode.
# In[39]:
ss=SparkSession.builder.appName("lab6").getOrCreate()
# ## Exercise 1 (5 points) (a) Add your name below AND (b) replace the path below with the path of your home directory.
# ## Answer for Exercise 1
# - a: Your Name:
# ### <NAME>
# ## Exercise 2 (5 points) Modify the pathnames so that you can read the input CSV files (movies_2 and ratings_2 from ICDS Jupyter Lab) from the correct location.
# In[40]:
movies_DF = ss.read.csv("movies_2.csv", header=True, inferSchema=True)
# In[26]:
# movies_DF.printSchema()
# In[41]:
ratings_DF = ss.read.csv("ratings_2.csv", header=True, inferSchema=True)
# In[28]:
# ratings_DF.printSchema()
# In[42]:
ratings2_DF = ratings_DF.select("UserID","MovieID","Rating")
# In[30]:
# ratings2_DF.first()
# In[43]:
ratings2_RDD = ratings2_DF.rdd
# # 6.1 Split Data into Three Sets: Training Data, Evaluatiion Data, and Testing Data
# In[44]:
training_RDD, validation_RDD, test_RDD = ratings2_RDD.randomSplit([3, 1, 1], 137)
# ## Prepare input (UserID, MovieID) for validation and for testing
# In[45]:
import pandas as pd
import numpy as np
import math
# In[46]:
validation_input_RDD = validation_RDD.map(lambda x: (x[0], x[1]))
testing_input_RDD = test_RDD.map(lambda x: (x[0], x[1]) )
# # 6.2 Iterate through all possible combination of a set of values for three hyperparameters for ALS Recommendation Model:
# - rank (k)
# - regularization
# - iterations
# ## Each hyperparameter value combination is used to construct an ALS recommendation model using training data, but evaluate using Evaluation Data
# ## The evaluation results are saved in a Pandas DataFrame
# ``
# hyperparams_eval_df
# ``
# ## The best hyperprameter value combination is stored in 4 variables
# ``
# best_k, best_regularization, best_iterations, and lowest_validation_error
# ``
# # improve the performance by use presist() method
# In[52]:
training_RDD.persist()
validation_input_RDD.persist()
validation_RDD.persist()
# # Exercise 3 (15 points) Complete the code below to iterate through a set of hyperparameters to create and evaluate ALS recommendation models.
# In[53]:
## Initialize a Pandas DataFrame to store evaluation results of all combination of hyper-parameter settings
hyperparams_eval_df = | pd.DataFrame( columns = ['k', 'regularization', 'iterations', 'validation RMS', 'testing RMS'] ) | pandas.DataFrame |
import pandas as pd
from flask import Blueprint, jsonify, request, render_template, flash, redirect
from web_app.models import Strain, db, migrate
from web_app.services.strain_service import strains
import os
from dotenv import load_dotenv
load_dotenv()
API_KEY = os.getenv("API_KEY")
strain_routes = Blueprint("strain_routes", __name__)
@strain_routes.route("/strain-db-update")
def strain_update():
df = pd.DataFrame(strains)
for index, row in df.iterrows():
strain = Strain(strain_description=str(row["Strain_description"]),
strain_flavor_profile=str(row["flavors"]),
strain_relief_profile=str(row["feelings_symptoms"]),
strain_name=str(row["strain"]),
strain_type=str(row["strain_type"]))
db.session.add(strain)
db.session.commit()
return "Strain DB Update Successful"
@strain_routes.route('/db-refresh')
def refresh():
print("URL PARMS", dict(request.args))
if "api_key" in dict(request.args) and request.args["api_key"] == API_KEY:
print(type(db))
db.drop_all()
db.create_all()
strain_update()
return jsonify({"message": "DB RESET OK"})
else:
flash("OOPS Permission Denied", "danger")
return redirect("/recommendation_form")
@strain_routes.route('/ml_strains.json')
def ml_strains():
url = "https://raw.githubusercontent.com/bw-ft-medcab3-brian/ds/master/data/clean/strain_descriptions_with_machine.csv"
df = | pd.read_csv(url) | pandas.read_csv |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
| tm.makeObjectSeries() | pandas.util.testing.makeObjectSeries |
import pytest
from mapping import mappings
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
from pandas.tseries.offsets import BDay
@pytest.fixture
def dates():
return pd.Series(
[TS('2016-10-20'), TS('2016-11-21'), TS('2016-12-20')],
index=['CLX16', 'CLZ16', 'CLF17']
)
def test_not_in_roll_one_generic_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:2]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
trans = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16']])
midx.names = ['date', 'contract']
cols = pd.Index([0], name='generic')
wts_exp = pd.DataFrame([1.0, 1.0], index=midx, columns=cols)
# with DatetimeIndex
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
# with tuple
wts = mappings.roller(tuple(timestamps), contract_dates,
mappings.static_transition, transition=trans)
assert_frame_equal(wts, wts_exp)
def test_not_in_roll_one_generic_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_non_numeric_column_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([["CL1"], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [("CL1", 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_finished_roll_pre_expiry_static_transition(dates):
contract_dates = dates.iloc[0:2]
ts = dates.iloc[0] + BDay(-2)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-9, -8]
transition = pd.DataFrame([[1.0, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_filtering_front_contracts_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:2]
ts = dates.iloc[1] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_roll_with_holiday(dates):
contract_dates = dates.iloc[-2:]
ts = pd.Timestamp("2016-11-17")
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
holidays = [np.datetime64("2016-11-18")]
# the holiday moves the roll schedule up one day, since Friday is
# excluded as a day
wts = mappings.static_transition(ts, contract_dates, transition,
holidays)
wts_exp = [(0, 'CLZ16', 0.5, ts), (0, 'CLF17', 0.5, ts)]
assert wts == wts_exp
def test_not_in_roll_one_generic_zero_weight_back_contract_no_contract_static_transition(dates): # NOQA
contract_dates = dates.iloc[0:1]
ts = dates.iloc[0] + BDay(-8)
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts)]
assert wts == wts_exp
def test_aggregate_weights():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list)
idx = pd.MultiIndex.from_product([[ts], ["CLX16", "CLZ16"]],
names=["date", "contract"])
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_aggregate_weights_drop_date():
ts = pd.Timestamp("2015-01-01")
wts_list = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
wts = mappings.aggregate_weights(wts_list, drop_date=True)
idx = pd.Index(["CLX16", "CLZ16"], name="contract")
cols = pd.Index([0, 1], name="generic")
wts_exp = pd.DataFrame([[1.0, 0], [0, 1.0]], index=idx, columns=cols)
assert_frame_equal(wts, wts_exp)
def test_static_bad_transitions(dates):
contract_dates = dates.iloc[[0]]
ts = dates.iloc[0] + BDay(-8)
# transition does not contain 'front' column
cols = pd.MultiIndex.from_product([[0], ['not_front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.5], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition does not sum to one across rows
cols = pd.MultiIndex.from_product([[0], ['front', 'back']])
transition = pd.DataFrame([[1.0, 0.0], [0.5, 0.0], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
# transition is not monotonic increasing in back
transition = pd.DataFrame([[0.7, 0.3], [0.8, 0.2], [0.0, 1.0]],
index=idx, columns=cols)
with pytest.raises(ValueError):
mappings.static_transition(ts, contract_dates, transition)
def test_no_roll_date_two_generics_static_transition(dates):
dt = dates.iloc[0]
contract_dates = dates
ts = dt + BDay(-8)
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.static_transition(ts, contract_dates, transition)
wts_exp = [(0, 'CLX16', 1.0, ts), (1, 'CLZ16', 1.0, ts)]
assert wts == wts_exp
def test_not_in_roll_two_generics_static_roller(dates):
dt = dates.iloc[0]
contract_dates = dates.iloc[0:3]
sd, ed = (dt + BDay(-8), dt + BDay(-7))
timestamps = pd.date_range(sd, ed, freq='b')
cols = pd.MultiIndex.from_product([[0, 1], ['front', 'back']])
idx = [-2, -1, 0]
transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5],
[0.0, 1.0, 0.0, 1.0]],
index=idx, columns=cols)
wts = mappings.roller(timestamps, contract_dates,
mappings.static_transition,
transition=transition)
midx = pd.MultiIndex.from_product([timestamps, ['CLX16', 'CLZ16']])
midx.names = ['date', 'contract']
cols = pd.Index([0, 1], name='generic')
wts_exp = pd.DataFrame([[1.0, 0.0], [0.0, 1.0],
[1.0, 0.0], [0.0, 1.0]], index=midx,
columns=cols)
| assert_frame_equal(wts, wts_exp) | pandas.util.testing.assert_frame_equal |
"""Base class for any input"""
from abc import ABC, abstractmethod
from typing import List
import pandas as pd
import babao.config as conf
import babao.utils.date as du
import babao.utils.file as fu
import babao.utils.log as log
INPUTS = [] # type: List[ABCInput]
LAST_WRITE = 0 # TODO: this is a stupid idea, bugs incoming!
REAL_TIME_LOOKBACK_DAYS = 7 # TODO: infere this from models/graph
CACHE_REAL_TIME_LOOKBACK_DAYS = REAL_TIME_LOOKBACK_DAYS * 4
TRAIN_TEST_RATIO = 3 / 4
SPLIT_DATE = int(
du.EPOCH + (du.TIME_TRAVELER.getTime(force=True) - du.EPOCH)
* TRAIN_TEST_RATIO
)
def resampleSerie(s):
"""
Call Serie.resample on s with preset parameters
(the serie's index must be datetime)
"""
# TODO: would be nice to do the base init once for all features
# (ensure sync and save some computing)
# also don't convert date or do it in utils.date
base = du.toDatetime(LAST_WRITE)
base = (base.minute + (base.second + 1) / 60) % 60
return s.resample(
str(conf.TIME_INTERVAL) + "Min",
closed="right",
label="right",
base=base
)
class ABCInput(ABC):
"""
Base class for any input
Your subclass should at least implement:
* fetch : self -> DataFrame
* raw_columns : List[str]
And eventually: (if you want self.resample to works)
* _resample : self -> DataFrame -> DataFrame
* fillMissing : self -> DataFrame -> DataFrame
* resampled_columns : List[str]
(cf. specific method doc-string in this class)
"""
@property
@abstractmethod
def raw_columns(self) -> List[str]:
"""
The columns names of your raw data
(as fetched and stored in database)
"""
pass
@property
@abstractmethod
def resampled_columns(self) -> List[str]:
"""The columns names of your resampled data (from raw data)"""
pass
def __init__(self):
self.up_to_date = True
self.current_row = None
self._cache_data = None
if conf.CURRENT_COMMAND == "train":
self.cache()
elif conf.CURRENT_COMMAND == "backtest":
self.cache(
since=SPLIT_DATE, till=du.TIME_TRAVELER.getTime(force=True)
)
else: # real-time
last_entry = fu.getLastRows(self.__class__.__name__, 1)
if not last_entry.empty:
du.TIME_TRAVELER.setTime(last_entry.index[0])
since = du.TIME_TRAVELER.nowMinus(
days=CACHE_REAL_TIME_LOOKBACK_DAYS
)
du.TIME_TRAVELER.setTime(None)
self.cache(since=since)
def write(self, raw_data):
"""Write the given raw_data to the database, and cache it if needed"""
if raw_data is None or raw_data.empty:
return None
if not fu.write(self.__class__.__name__, raw_data):
log.warning(
"Couldn't write to database frame '"
+ self.__class__.__name__ + "'"
)
return False
self.cache(fresh_data=raw_data)
return True
def _readFromCache(self, since=None, till=None):
"""Read data in cache from ´since´ to ´till´"""
if self._cache_data.empty:
return self._cache_data
return self._cache_data.loc[since:till]
def _readFromFile(self, since=None, till=None):
"""Read data in database from ´since´ to ´till´"""
where = None
if since is not None:
where = "index > %d" % since
if till is not None:
where += " & index < %d" % till
return fu.read(self.__class__.__name__, where=where)
def read(self, since=None, till=None):
"""Read data in database or cache from ´since´ to ´till´"""
if since is None:
since = du.EPOCH
now = du.TIME_TRAVELER.getTime()
if till is None or till > now:
till = now
if self._cache_data is not None:
return self._readFromCache(since, till)
return self._readFromFile(since, till)
def cache(self, fresh_data=None, since=None, till=None):
"""
Save some data to cache
If ´fresh_data´ is given, append it to cache,
otherwise read in database from ´since´ to ´till´ and cache it
"""
if fresh_data is not None:
self._cache_data = self._cache_data.append(
fresh_data
)
if not self._cache_data.empty:
self._cache_data = self._cache_data.loc[
self._cache_data.index[-1]
- du.secToNano(CACHE_REAL_TIME_LOOKBACK_DAYS * 24 * 3600):
]
else:
log.debug(
"Caching data from", du.toStr(since), "to", du.toStr(till),
"(" + self.__class__.__name__ + ")"
)
self._cache_data = self._readFromFile(since, till)
if not self._cache_data.empty:
self.updateCurrentRow(self._cache_data.iloc[-1])
else:
log.warning("Database '" + self.__class__.__name__ + "' is emtpy")
self._cache_data = | pd.DataFrame(columns=self.raw_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 15:13:29 2018
@author: Branson
"""
import BATrader as ba
from collections import defaultdict
import pandas as pd
import numpy as np
import threading
# =============================================================================
# Good implementation of products
# =============================================================================
"""
2019-02 Actually we don't need the product to have ability to inspect itself
"""
class BaseClassProducts(dict):
"""
Derived from dictionary
"""
# Public
# symlist_index = defaultdict(list)
# quotes = defaultdict(list)
@classmethod
def find_by_sym(cls, sym):
'''
Using to find and return the instances
'''
return cls.symlist_index[sym][0]
@classmethod
def check_sym_instances(cls, sym):
'''
Return true if it is a instance of a 'sym'
'''
return sym in cls.symlist_index
@classmethod
def return_quotes(cls):
'''
Return the quotes of the class
'''
return cls.quotes
def shift(self, shift: int):
if shift > 0:
self.shifted_daybar = self.shifted_daybar.append(self.daybar[-shift:], sort=True)
self.daybar = self.daybar[:-shift]
def shift_by_day(self, day):
self.shifted_daybar = self.shifted_daybar.append(self.daybar[day:], sort= True)
self.daybar = self.daybar[:day]
#self.shifted_data1min = self.shifted_data1min.append(self.data1min[ba.dk.format_add_hyphen(day)])
#self.data1min = self.data1min[:ba.dk.format_add_hyphen(day)]
class Stock(BaseClassProducts):
"""
Pervious we use loaded_data to prevent dump from loading more than one time.
But actually we may want to keep that.(dump will load again) in case some data
updated to sql first and get it out again (like mmi)
MemoryDict is running dump only once. Because it call __missing__, so it's safe
to remove self.loaded_data. (2019-06-30)
"""
symlist_index = defaultdict(list)
quotes = defaultdict(dict) # for storing the latest quotes when update
# loaded data meta 預先知道什麼Data被load了,跟著下面更新
meta = ['daybar', 'data1min']
def __init__(self, sym):
# BaseClass_Products.__init__(self)
self.sym = sym
self.chi_name = ba.fr.get_chi_name(sym)
Stock.symlist_index[sym].append(self)
# Scan meta : sometimes we need to put some scan meta data to Stock obj
self.scanner_meta_data = {}
self.shifted_daybar = pd.DataFrame()
self.shifted_data1min = pd.DataFrame()
@classmethod
def find_by_sym(cls, sym):
return Stock.symlist_index[sym][0]
def display(self):
print("Symbol:", self.sym)
print("no. of EOD bar:", len(self.daybar))
print("no. of 1min bar:", len(self.data1min))
def dump(self, recalc= True, dayback= None):
"""
load some dump data
daybar and data1min is loaded from DB
recalc:
True will using tc recalc
dayback:
just load dayback data, must come with recalc= False
"""
print('Dumping : %s' % self.sym)
# Name
setattr(self, 'name', ba.fr.get_name(self.sym))
setattr(self, 'chi_name', ba.fr.get_chi_name(self.sym))
# OHLC
if recalc:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db_recalc_with_meta(self.sym))
else:
if dayback:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db(self.sym, dayback= dayback))
else:
setattr(self, 'daybar', ba.tc.get_bar_ex_eod_db(self.sym))
setattr(self, 'data1min', ba.rtq.get_1min_sql(self.sym, dayback=30))
def dump_meta_data(self):
self.load_hkex_news()
setattr(self, 'concat', pd.concat([self.daybar, self.mmi, self.details], sort=True))
self.load_ccass()
def dump_warrant_cbbc(self):
if not hasattr(self, 'warrant'):
self.warrant = ba.hsbc.get_warrant_group_by(self.sym)
if not hasattr(self, 'cbbc'):
self.cbbc = ba.hsbc.get_cbbc_group_by(self.sym)
def load_hkexnews(self):
"""
we always want this to be called once only. Since we can't backtest it easily.
Just save some energy in daily running
"""
if not hasattr(self, 'hkexnews'):
setattr(self, 'hkexnews', ba.hkex.hkexnews_single_stock_news(self.sym))
def load_ccass(self):
setattr(self, 'ccass', ba.ccass.CCASS(self.sym))
def load_mmi(self, dayback= 30):
setattr(self, 'mmi', ba.mmi.get_by_sym_sql(self.sym, dayback= dayback))
def load_details(self, dayback= 30, col_lst = []):
setattr(self, 'details', ba.etnet.get_by_sym_sql(self.sym, dayback= dayback, col_lst= col_lst))
def load_min_bar(self, min_: str):
setattr(self, 'data%smin' % min_,
ba.algo.make_min_bar(self.data1min, '%sMin' % min_, simple_mode= True))
def load_chi_name(self):
setattr(self, 'chi_name', ba.fr.get_chi_name(self.sym))
def _convert_tc_rtq_to_dataframe(self, dic):
df = pd.DataFrame.from_dict(dic, orient='index').T
df = pd.DataFrame.from_dict(dic, orient='index').T
df['Date'] = | pd.to_datetime(df['Date'], format="%Y%m%d") | pandas.to_datetime |
"""
test pandabase against supported databases through fixtures:
sqlite: automatic - because SQLite is filesystem- or memory-based, sqlite does not require any setup
postgres: not automatic; execute these with pytest --run-postgres. postgresql requires:
postgres service to be running in background
a database has been set up: testdb
a user/password: <PASSWORD>
"""
import pandas as pd
import pytest
import sqlalchemy as sa
import pandabase as pb
from pandas import set_option
from os.path import join
from logging import basicConfig, DEBUG
from types import SimpleNamespace
import pytz
UTC = pytz.utc
# pd.set_option('display.max_colwidth', 12)
| set_option('expand_frame_repr', True) | pandas.set_option |
def search(name=None, source=None, id_No=None, markdown=False):
"""
Search function that interacts directly with the Global Lake Level Database API.
Arguments:
name (str): Name of Lake or Reservoir. Be sure to use proper spelling. Wildcards (%) are allowed,as is any MySQL 5.7 syntax
source (str): Lake water level source flag, accepted values are "usgs", "grealm", or "hydroweb"
id_No (str,int): Global Lake Level Database identification number
markdown (bool, optional): Returns markdown dataframe when True
Returns:
Lake object: `Lake()` object
"""
import pandas as pd
import requests
import warnings
from IPython.display import display
if id_No:
id_No = str(id_No)
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/search/?idNo={}'.format(
id_No)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
elif not source:
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/search/?name={}'.format(
name)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
elif source:
url = 'https://4o8d0ft32f.execute-api.us-east-2.amazonaws.com/prod/glld/' \
'search?name={}&source={}'.format(name, source)
r = requests.get(url)
json_decode = r.json()
df = pd.DataFrame().from_records(json_decode, columns = ['id_No', 'lake_name', 'source', 'metadata'])
else:
raise ValueError("I don't know how you did this, but if you did, make a github issue!")
if len(df) < 1:
raise RuntimeError('No results returned. Please adjust search parameters or see documentation')
if len(df) > 1:
warnings.warn('Search Result: \'{}\' has more than 1 Result. Showing the {} most relevant results.\n'
'Specify \'id_No\' or narrow search name.'.format(name, len(df)), category = RuntimeWarning)
if markdown is True:
print(df.filter(['id_No', 'source', 'lake_name']).to_markdown())
else:
print(df.filter(['id_No', 'source', 'lake_name']))
elif len(df) == 1:
meta_series = df['metadata'].map(eval).apply(pd.Series)
df_unpacked = pd.merge(left = df,
right = meta_series.drop(['source', 'lake_name'],
axis = 1),
left_index = True,
right_index = True,
how = 'outer').drop('metadata', axis = 1)
if markdown is True:
print(df_unpacked.to_markdown())
else:
with | pd.option_context('display.max_rows', 5, 'display.max_columns', None) | pandas.option_context |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - | pd.Period('2012-01', freq='M') | pandas.Period |
import matplotlib.pylab as plt
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pandas import Series
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from math import sqrt
import sys
from sklearn.cluster import KMeans
from sklearn import preprocessing
df = pd.read_csv('user_balance_table_all.csv', index_col='user_id', names=['user_id', 'report_date', 'tBalance', 'yBalance', 'total_purchase_amt', 'direct_purchase_amt', 'purchase_bal_amt', 'purchase_bank_amt', 'total_redeem_amt', 'consume_amt', 'transfer_amt', 'tftobal_amt', 'tftocard_amt', 'share_amt', 'category1', 'category2', 'category3', 'category4'
], parse_dates=[1])
df['report_date'] = | pd.to_datetime(df['report_date'], errors='coerce') | pandas.to_datetime |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.window.common import flex_binary_moment
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
# binary moments
def test_rolling_cov(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(series):
A = series
B = A + np.random.randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(func, frame):
result = getattr(frame.rolling(window=10, min_periods=5), func)()
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])
tm.assert_series_equal(result, expected, check_names=False)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(method, frame):
series = frame[1]
res = getattr(series.rolling(window=10), method)(frame)
res2 = getattr(frame.rolling(window=10), method)(series)
exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_rolling_apply_consistency_sum_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum_no_nans(
consistency_data, window, min_periods, center, f
):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
rolling_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
| tm.assert_equal(rolling_f_result, rolling_apply_f_result) | pandas._testing.assert_equal |
from typing import Union
from collections import OrderedDict
import numpy as np
import pandas as pd
import plotly.offline as opy
import plotly.graph_objs as go
import plotly.figure_factory as ff
class PySingleSiteSimpleSchedule:
def __init__(
self,
objectives: dict,
campaigns_table: list,
batches_table: list=None,
tasks_table: list=None,
kg_inventory: list=None,
kg_backlog: list=None,
kg_supply: list=None,
kg_waste: list=None,
):
'''
A Python helper class for encapsulating biopharma_scheduling solutions
and their attributes (e.g. objective values, campaigns list, batches list).
PARAMETERS:
objectives: dict
A Python dictionary of objective name and value pairs, e.g.:
{
'total_kg_inventory_deficit': float,
'total_kg_throughput': float,
'total_kg_backlog': float,
'total_kg_waste': float,
'total_profit': float,
'total_cost': float,
}
campaigns_table: list of Union[dict, OrderedDict]
A Python list of either dict or OrderedDict, e.g.:
[
{
'Product': str, product label,
'Batches': int,
'Kg': float,
'Start': str, date in the '%Y-%m-%d' format,
'First Harvest': str, date in the '%Y-%m-%d' format,
'First Batch': str, date in the '%Y-%m-%d' format,
'Last Batch': str, date in the '%Y-%m-%d' format
},
...
]
batches_table: list of Union[dict, OrderedDict], optional, default None
A Python list of either dict or OrderedDict, e.g.:
[
{
'Product': str, product label,
'Kg': float,
'Harvested on': str, date in the '%Y-%m-%d' format,
'Stored on': str, date in the '%Y-%m-%d' format,
'Expires on': str, date in the '%Y-%m-%d' format,
'Approved on': str, date in the '%Y-%m-%d' format
},
...
]
tasks_table: list of Union[dict, OrderedDict], optional, default None
A Python list of either dict or OrderedDict, e.g.:
[
{
'Product': str, product label,
'Task': str, task name, e.g. { 'Inoculation', 'Seed', 'Production' },
'Start': str, date in the '%Y-%m-%d' format,
'Finish on': str, date in the '%Y-%m-%d' format
},
...
]
kg_inventory: list, optional, default None
Example:
{
'date': str, date in the '%Y-%m-%d' format,
'<produt label1>': float,
'<produt label2>': float,
...
'<produt labeln>': float,
}
kg_backlog: list, optional, default None
Example:
[
{
'date': str, date in the '%Y-%m-%d' format,
'<produt label1>': float,
'<produt label2>': float,
...
'<produt labeln>': float,
},
...
]
kg_supply: list, optional, default None
A Python list of
[
{
'date': str, date in the '%Y-%m-%d' format,
'<produt label1>': float,
'<produt label2>': float,
...
'<produt labeln>': float,
},
...
]
kg_waste: list, optional, default None
Example:
[
{
'date': str, date in the '%Y-%m-%d' format,
'<produt label1>': float,
'<produt label2>': float,
...
'<produt labeln>': float,
},
...
]
'''
self.__objectives = pd.DataFrame.from_records([objectives], index=['value'])
self.__campaigns = pd.DataFrame.from_records(campaigns_table)
self.__batches = pd.DataFrame.from_records(batches_table) if batches_table else None
self.__tasks = pd.DataFrame.from_records(tasks_table) if tasks_table else None
self.__kg_inventory = pd.DataFrame.from_records(kg_inventory) if kg_inventory else None
self.__kg_backlog = pd.DataFrame.from_records(kg_backlog) if kg_backlog else None
self.__kg_supply = pd.DataFrame.from_records(kg_supply) if kg_supply else None
self.__kg_waste = pd.DataFrame.from_records(kg_waste) if kg_waste else None
for df in [self.__kg_inventory, self.__kg_backlog, self.__kg_supply, self.__kg_waste]:
if df is not None:
df.index = pd.to_datetime(df['date'])
del df['date']
def campaigns_gantt(self, colors: dict=None, layout: dict=None):
'''
Creates a Gantt chart of the campaigns table.
INPUT:
colors: dict, optional, default None
A dictionary of product label and color code pairs (RGB or HEX)
for the Gantt chart.
{
'A': 'rgb(146, 208, 80)',
'B': 'rgb(179, 129, 217)',
'C': 'rgb(196, 189, 151)',
'D': 'rgb(255, 0, 0)'
}
layout: dict, optiona, default None
A dictionary of configuration parameters for the Gantt chart.
See https://plot.ly/python/gantt/.
'''
df = self.__campaigns.reset_index()
df['Finish'] = df['Last Batch']
df['Resource'] = df['Product']
df['Task'] = df['Product']
df = df.to_dict('records')
gantt = ff.create_gantt(
df,
colors=colors,
index_col='Resource',
group_tasks=True,
showgrid_x=True,
showgrid_y=True
)
for gantt_row, campaign in zip(gantt['data'], df):
text = '<br>'.join([
'{}: {}'.format(key, val)
for key, val in campaign.items()
if key not in { 'index', 'Finish', 'Resource', 'Task' }
])
gantt_row.update({'text': text})
if layout is None:
gantt['layout'].update({
'title': '',
'xaxis': {
'tickangle': -30,
'side': 'bottom'
}
})
else:
gantt['layout'].update(layout)
return opy.iplot(gantt)
def tasks_gantt(self, colors: dict=None, layout: dict=None):
'''
Creates a Gantt chart of the campaigns table.
INPUT:
colors: dict, optional, default None
A dictionary of product label and color code pairs (RGB or HEX)
for the Gantt chart.
{
'A': 'rgb(146, 208, 80)',
'B': 'rgb(179, 129, 217)',
'C': 'rgb(196, 189, 151)',
'D': 'rgb(255, 0, 0)'
}
title: str, optional, default ''
Title displayed at the top of the Gantt chart.
'''
df = self.__tasks.reset_index()
df['Resource'] = df['Product']
df = df.to_dict('records')
gantt = ff.create_gantt(
df,
colors=colors,
index_col='Resource',
group_tasks=True,
showgrid_x=True,
showgrid_y=True,
show_colorbar=True
)
for gantt_row, campaign in zip(gantt['data'], df):
text = '<br>'.join([
'{}: {}'.format(key, val)
for key, val in campaign.items()
if key not in {'index', 'Resource'}
])
gantt_row.update({'text': text})
if layout is None:
gantt['layout'].update({
'title': '',
'xaxis': {
'tickangle': -30,
'side': 'bottom'
}
})
else:
gantt['layout'].update(layout)
return opy.iplot(gantt)
@property
def objectives(self):
return self.__objectives
@property
def campaigns(self):
return self.__campaigns
@property
def batches(self):
return self.__batches
@property
def tasks(self):
return self.__tasks
@property
def kg_inventory(self):
return self.__kg_inventory
@property
def kg_backlog(self):
return self.__kg_backlog
@property
def kg_supply(self):
return self.__kg_supply
@property
def kg_waste(self):
return self.__kg_waste
class PySingleSiteMultiSuiteSchedule:
def __init__(
self,
objectives: dict,
campaigns_table: list,
batches_table: list=None,
batch_inventory: list=None,
batch_backlog: list=None,
batch_supply: list=None,
batch_waste: list=None,
):
self.__objectives = pd.DataFrame.from_records([objectives], index=['value'])
self.__campaigns = pd.DataFrame.from_records(campaigns_table)
self.__batches = | pd.DataFrame.from_records(batches_table) | pandas.DataFrame.from_records |
# Based on Code of <NAME>, added various modifications
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import sys
sys.path.insert(0,'/code')
import argparse
import os
import pathlib
from os.path import isfile, join
import numpy as np
import pandas as pd
import torch
from models.neuralhash import NeuralHash
from PIL import Image
from tqdm import tqdm
from utils.hashing import compute_hash, load_hash_matrix
def main():
# Parse command-line arguments
parser = argparse.ArgumentParser(
description='Perform neural collision attack.')
parser.add_argument('--source', dest='source', type=str,
default='data/imagenet_test', help='image folder to compute hashes for')
args = parser.parse_args()
# Load images
if os.path.isfile(args.source):
images = [args.source]
elif os.path.isdir(args.source):
datatypes = ['png', 'jpg', 'jpeg']
images = [os.path.join(path, name) for path, subdirs, files in os.walk(
args.source) for name in files]
else:
raise RuntimeError(f'{args.source} is neither a file nor a directory.')
# Load pytorch model and hash matrix
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = NeuralHash()
model.load_state_dict(torch.load('./models/model.pth'))
model.to(device)
seed = load_hash_matrix()
seed = torch.tensor(seed).to(device)
# Prepare results
result_df = | pd.DataFrame(columns=['image', 'hash_bin', 'hash_hex']) | pandas.DataFrame |
from ast import literal_eval
import numpy as np
import pandas as pd
import scipy
from pandas import DataFrame
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.preprocessing import MultiLabelBinarizer, Normalizer
from tqdm import tqdm
def parse_json(filename_python_json: str, read_max: int = -1) -> DataFrame:
"""Parses json file into a DataFrame
Args:
filename_python_json (str): Path to json file
read_max (int, optional): Max amount of lines to read from json file. Defaults to -1.
Returns:
DataFrame: DataFrame from parsed json
"""
with open(filename_python_json, "r", encoding="utf-8") as f:
# parse json
parse_data = []
# tqdm is for showing progress bar, always good when processing large amounts of data
for line in tqdm(f):
# load python nested datastructure
parsed_result = literal_eval(line)
parse_data.append(parsed_result)
if read_max != -1 and len(parse_data) > read_max:
print(f"Break reading after {read_max} records")
break
print(f"Reading {len(parse_data)} rows.")
# create dataframe
df = | DataFrame.from_dict(parse_data) | pandas.DataFrame.from_dict |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal( | pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol') | pandas.Series |
from unittest import TestCase
from quick_pandas import monkey
monkey.patch_all()
from quick_pandas.wrappers.numpy_wrapper import ndarray_wrapper
class TestPandas(TestCase):
def test_dataframe_ndarray(self):
import pandas as pd
import numpy as np
data = np.random.randint(0, 10000, 1000)
df = pd.DataFrame(data)
# self.assertEqual(getattr(df._data.blocks[0].values, '__actual_class__', None), ndarray_wrapper)
def test_dataframe_list(self):
import pandas as pd
data = [1, 2, 3]
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
import re
import util
import os
import xml.etree.ElementTree as ET
import datetime as dt
from scipy.sparse import dok_matrix
import hashlib
import six
from six.moves import range
from six.moves import zip
class XGMML(object):
def __init__(self):
self.T_node=None
self.T_edge=None
self.name="untitled"
def parse(self, s_file):
tree=ET.parse(s_file)
root=tree.getroot()
self.name=root.attrib['label']
c_name={}
nodes=[]
for node in root:
if not node.tag.endswith('node'): continue
id=node.attrib['id']
c_name[id]=node.attrib['label']
c={}
#c['_id']=id
for att in node:
if att.tag.endswith('graphics'):
for k,v in att.attrib.items():
c['graphics_'+k]=v
continue
elif not att.tag.endswith('att'):
continue
v=att.attrib.get('value', None)
ty=att.attrib['type']
if ty=='integer':
v=int(v) if v is not None else 0
elif ty=='real':
v=float(v) if v is not None else 0.0
c[att.attrib['name']]='' if pd.isnull(v) else v
nodes.append(c)
self.T_node= | pd.DataFrame(nodes) | pandas.DataFrame |
import io
import os
import re
import sys
import time
import pandas
import datetime
import requests
import mplfinance
from matplotlib import dates
# Basic Data
file_name = __file__[:-3]
absolute_path = os.path.dirname(os.path.abspath(__file__))
# <editor-fold desc='common'>
def load_json_config():
global file_directory
config_file = os.path.join(os.sep, absolute_path, 'Config.cfg')
with open(config_file, 'r') as file_handler:
config_data = file_handler.read()
regex = 'FILE_DIRECTORY=.*'
match = re.findall(regex, config_data)
file_directory = match[0].split('=')[1].strip()
# </editor-fold>
# <editor-fold desc='daily update'>
def save_dict_to_file(dic, txt):
f = open(txt, 'w', encoding='utf-8')
f.write(dic)
f.close()
def load_dict_from_file(txt):
f = open(txt, 'r', encoding='utf-8')
data = f.read()
f.close()
return eval(data)
def crawl_price(date=datetime.datetime.now()):
date_str = str(date).split(' ')[0].replace('-', '')
r = requests.post('http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=' + date_str + '&type=ALL')
ret = pandas.read_csv(io.StringIO('\n'.join([i.translate({ord(c): None for c in ' '}) for i in r.text.split('\n') if
len(i.split(',')) == 17 and i[0] != '='])), header=0,
index_col='證券代號')
ret['成交金額'] = ret['成交金額'].str.replace(',', '')
ret['成交股數'] = ret['成交股數'].str.replace(',', '')
return ret
def original_crawl_price(date='2011-01-01 00:00:00'):
print('Begin: original_crawl_price!')
data = {}
success = False
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(date, dateFormatter)
while not success:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success!')
success = True
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
# 減一天
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume.index = pandas.to_datetime(stock_volume.index)
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open.index = pandas.to_datetime(stock_open.index)
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close.index = pandas.to_datetime(stock_close.index)
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high.index = pandas.to_datetime(stock_high.index)
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low.index = pandas.to_datetime(stock_low.index)
stock_low.to_excel(writer, sheet_name='stock_low', index=True)
writer.save()
print('End: original_crawl_price!')
def update_stock_info():
print('Begin: update_stock_info!')
data = {}
count = 1
fail_count = 0
allow_continuous_fail_count = 20
try:
pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
print(r'{} Exist.'.format(stock_file_path))
except FileNotFoundError:
print(r'{} Not Exist.'.format(stock_file_path))
original_crawl_price()
stock_volume_old = pandas.read_excel(stock_file_path, sheet_name='stock_volume', index_col=0)
stock_volume_old.index = pandas.to_datetime(stock_volume_old.index)
stock_open_old = pandas.read_excel(stock_file_path, sheet_name='stock_open', index_col=0)
stock_open_old.index = pandas.to_datetime(stock_open_old.index)
stock_close_old = pandas.read_excel(stock_file_path, sheet_name='stock_close', index_col=0)
stock_close_old.index = pandas.to_datetime(stock_close_old.index)
stock_high_old = pandas.read_excel(stock_file_path, sheet_name='stock_high', index_col=0)
stock_high_old.index = pandas.to_datetime(stock_high_old.index)
stock_low_old = pandas.read_excel(stock_file_path, sheet_name='stock_low', index_col=0)
stock_low_old.index = pandas.to_datetime(stock_low_old.index)
last_date = stock_volume_old.index[-1]
dateFormatter = '%Y-%m-%d %H:%M:%S'
date = datetime.datetime.strptime(str(last_date), dateFormatter)
date += datetime.timedelta(days=1)
if date > datetime.datetime.now():
print('Finish update_stock_info!')
sys.exit(0)
while date < datetime.datetime.now() and count <= 100:
print('parsing', date)
try:
data[date.date()] = crawl_price(date)
print('success {} times!'.format(count))
fail_count = 0
count += 1
except pandas.errors.EmptyDataError:
# 假日爬不到
print('fail! check the date is holiday')
fail_count += 1
if fail_count == allow_continuous_fail_count:
raise
date += datetime.timedelta(days=1)
time.sleep(10)
writer = pandas.ExcelWriter(stock_file_path, engine='xlsxwriter')
stock_volume_new = pandas.DataFrame({k: d['成交股數'] for k, d in data.items()}).transpose()
stock_volume_new.index = pandas.to_datetime(stock_volume_new.index)
stock_volume = pandas.concat([stock_volume_old, stock_volume_new], join='outer')
stock_volume.to_excel(writer, sheet_name='stock_volume', index=True)
stock_open_new = pandas.DataFrame({k: d['開盤價'] for k, d in data.items()}).transpose()
stock_open_new.index = pandas.to_datetime(stock_open_new.index)
stock_open = pandas.concat([stock_open_old, stock_open_new], join='outer')
stock_open.to_excel(writer, sheet_name='stock_open', index=True)
stock_close_new = pandas.DataFrame({k: d['收盤價'] for k, d in data.items()}).transpose()
stock_close_new.index = pandas.to_datetime(stock_close_new.index)
stock_close = pandas.concat([stock_close_old, stock_close_new], join='outer')
stock_close.to_excel(writer, sheet_name='stock_close', index=True)
stock_high_new = pandas.DataFrame({k: d['最高價'] for k, d in data.items()}).transpose()
stock_high_new.index = pandas.to_datetime(stock_high_new.index)
stock_high = pandas.concat([stock_high_old, stock_high_new], join='outer')
stock_high.to_excel(writer, sheet_name='stock_high', index=True)
stock_low_new = pandas.DataFrame({k: d['最低價'] for k, d in data.items()}).transpose()
stock_low_new.index = pandas.to_datetime(stock_low_new.index)
stock_low = | pandas.concat([stock_low_old, stock_low_new], join='outer') | pandas.concat |
""" this will read the goes_r data"""
import pandas as pd
import xarray as xr
try:
import s3fs
has_s3fs = True
except ImportError:
print(
"Please install s3fs if retrieving from the Amazon S3 Servers. Otherwise continue with local data"
)
has_s3fs = False
try:
import h5py # noqa: F401
has_h5py = True
except ImportError:
print("Please install h5py to open files from the Amazon S3 servers.")
has_h5py = False
try:
import h5netcdf # noqa: F401
has_h5netcdf = True
except ImportError:
print("Please install h5netcdf to open files from the Amazon S3 servers.")
has_h5netcdf = False
from ..grids import _geos_16_grid
def _get_swath_from_fname(fname):
vert_grid_num = fname.split(".")[-4].split("v")[-1]
hori_grid_num = fname.split(".")[-4].split("v")[0].split("h")[-1]
return hori_grid_num, vert_grid_num
def _get_time_from_fname(fname):
import pandas as pd
u = pd.Series([fname.split(".")[-2]])
date = pd.to_datetime(u, format="%Y%j%H%M%S")[0]
return date
def _open_single_file(fname):
# open the file
dset = xr.open_dataset(fname)
dset = dset.rename({"t": "time"})
# get the area def
area = _geos_16_grid(dset)
dset.attrs["area"] = area
# get proj4 string
dset.attrs["proj4_srs"] = area.proj_str
# get longitude and latitudes
lon, lat = area.get_lonlats_dask()
dset.coords["longitude"] = (("y", "x"), lon)
dset.coords["latitude"] = (("y", "x"), lat)
for i in dset.variables:
dset[i].attrs["proj4_srs"] = area.proj_str
dset[i].attrs["area"] = area
# expand dimensions for time
dset = dset.expand_dims("time")
return dset
def open_dataset(date=None, filename=None, satellite="16", product=None):
g = GOES()
if filename is None:
try:
if date is None:
raise ValueError
if product is None:
raise ValueError
except ValueError:
print("Please provide a date and product to be able to retrieve data from Amazon S3")
ds = g.open_amazon_file(date=date, satellite=satellite, product=product)
else:
ds = g.open_local(filename)
return ds
class GOES:
def __init__(self):
self.date = None
self.satellite = "16"
self.product = "ABI-L2-AODF"
self.baseurl = f"s3://noaa-goes{self.satellite}/"
self.url = f"{self.baseurl}"
self.filename = None
self.fs = None
def _update_baseurl(self):
self.baseurl = f"s3://noaa-goes{self.satellite}/"
def set_product(self, product=None):
try:
if product is None:
raise ValueError
else:
self.url = f"{self.baseurl}{product}/"
except ValueError:
print("kwarg product must have a value")
def get_products(self):
products = [value.split("/")[-1] for value in self.fs.ls(self.baseurl)[:-1]]
return products
def date_to_url(self):
date = pd.Timestamp(self.date)
date_url_bit = date.strftime("%Y/%j/%H/")
self.url = f"{self.url}{date_url_bit}"
def _get_files(self, url=None):
try:
files = self.fs.ls(url)
if len(files) < 1:
raise ValueError
else:
return files
except ValueError:
print("Files not available for product and date")
def _get_closest_date(self, files=[]):
file_dates = [pd.to_datetime(f.split("_")[-1][:-4], format="c%Y%j%H%M%S") for f in files]
date = pd.Timestamp(self.date)
nearest_date = min(file_dates, key=lambda x: abs(x - date))
nearest_date_str = nearest_date.strftime("c%Y%j%H%M%S")
found_file = [f for f in files if nearest_date_str in f][0]
return found_file
def _set_s3fs(self):
if has_s3fs:
self.fs = s3fs.S3FileSystem(anon=True)
else:
self.fs = None
def _product_exists(self, product):
try:
if has_s3fs:
products = self.get_products()
if product not in products:
raise ValueError
else:
return product
else:
raise ImportError
except ImportError:
print("Please install s3fs to retrieve product information from Amazon S3")
except ValueError:
print("Product: ", product, "not found")
print("Available products:")
for i in products:
print(" ", i)
def open_amazon_file(self, date=None, product=None, satellite="16"):
self.date = | pd.Timestamp(date) | pandas.Timestamp |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import warnings
from .condition_fun import *
def eva_dfkslift(df, groupnum=None):
if groupnum is None: groupnum=len(df.index)
# good bad func
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
df_kslift = df.sort_values('pred', ascending=False).reset_index(drop=True)\
.assign(group=lambda x: np.ceil((x.index+1)/(len(x.index)/groupnum)))\
.groupby('group')['label'].agg([n0,n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})\
.assign(
group=lambda x: (x.index+1)/len(x.index),
good_distri=lambda x: x.good/sum(x.good),
bad_distri=lambda x: x.bad/sum(x.bad),
badrate=lambda x: x.bad/(x.good+x.bad),
cumbadrate=lambda x: np.cumsum(x.bad)/np.cumsum(x.good+x.bad),
lift=lambda x: (np.cumsum(x.bad)/np.cumsum(x.good+x.bad))/(sum(x.bad)/sum(x.good+x.bad)),
cumgood=lambda x: np.cumsum(x.good)/sum(x.good),
cumbad=lambda x: np.cumsum(x.bad)/sum(x.bad)
).assign(ks=lambda x:abs(x.cumbad-x.cumgood))
# bind 0
df_kslift=pd.concat([
pd.DataFrame({'group':0, 'good':0, 'bad':0, 'good_distri':0, 'bad_distri':0, 'badrate':0, 'cumbadrate':np.nan, 'cumgood':0, 'cumbad':0, 'ks':0, 'lift':np.nan}, index=np.arange(1)),
df_kslift
], ignore_index=True)
# return
return df_kslift
# plot ks
def eva_pks(dfkslift, title):
dfks = dfkslift.loc[lambda x: x.ks==max(x.ks)].sort_values('group').iloc[0]
###### plot ######
# fig, ax = plt.subplots()
# ks, cumbad, cumgood
plt.plot(dfkslift.group, dfkslift.ks, 'b-',
dfkslift.group, dfkslift.cumgood, 'k-',
dfkslift.group, dfkslift.cumbad, 'k-')
# ks vline
plt.plot([dfks['group'], dfks['group']], [0, dfks['ks']], 'r--')
# set xylabel
plt.gca().set(title=title+'K-S',
xlabel='% of population', ylabel='% of total Good/Bad',
xlim=[0,1], ylim=[0,1], aspect='equal')
# text
# plt.text(0.5,0.96,'K-S', fontsize=15,horizontalalignment='center')
plt.text(0.2,0.8,'Bad',horizontalalignment='center')
plt.text(0.8,0.55,'Good',horizontalalignment='center')
plt.text(dfks['group'], dfks['ks'], 'KS:'+ str(round(dfks['ks'],4)), horizontalalignment='center',color='b')
# plt.grid()
# plt.show()
# return fig
# plot lift
def eva_plift(dfkslift, title):
badrate_avg = sum(dfkslift.bad)/sum(dfkslift.good+dfkslift.bad)
###### plot ######
# fig, ax = plt.subplots()
# ks, cumbad, cumgood
plt.plot(dfkslift.group, dfkslift.cumbadrate, 'k-')
# ks vline
plt.plot([0, 1], [badrate_avg, badrate_avg], 'r--')
# set xylabel
plt.gca().set(title=title+'Lift',
xlabel='% of population', ylabel='% of Bad',
xlim=[0,1], ylim=[0,1], aspect='equal')
# text
# plt.text(0.5,0.96,'Lift', fontsize=15,horizontalalignment='center')
plt.text(0.7,np.mean(dfkslift.cumbadrate),'cumulate badrate',horizontalalignment='center')
plt.text(0.7,badrate_avg,'average badrate',horizontalalignment='center')
# plt.grid()
# plt.show()
# return fig
def eva_dfrocpr(df):
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
dfrocpr = df.sort_values('pred')\
.groupby('pred')['label'].agg([n0,n1,len])\
.reset_index().rename(columns={'n0':'countN','n1':'countP','len':'countpred'})\
.assign(
FN = lambda x: np.cumsum(x.countP),
TN = lambda x: np.cumsum(x.countN)
).assign(
TP = lambda x: sum(x.countP) - x.FN,
FP = lambda x: sum(x.countN) - x.TN
).assign(
TPR = lambda x: x.TP/(x.TP+x.FN),
FPR = lambda x: x.FP/(x.TN+x.FP),
precision = lambda x: x.TP/(x.TP+x.FP),
recall = lambda x: x.TP/(x.TP+x.FN)
).assign(
F1 = lambda x: 2*x.precision*x.recall/(x.precision+x.recall)
)
return dfrocpr
# plot roc
def eva_proc(dfrocpr, title):
dfrocpr = pd.concat(
[dfrocpr[['FPR','TPR']], pd.DataFrame({'FPR':[0,1], 'TPR':[0,1]})],
ignore_index=True).sort_values(['FPR','TPR'])
auc = dfrocpr.sort_values(['FPR','TPR'])\
.assign(
TPR_lag=lambda x: x['TPR'].shift(1), FPR_lag=lambda x: x['FPR'].shift(1)
).assign(
auc=lambda x: (x.TPR+x.TPR_lag)*(x.FPR-x.FPR_lag)/2
)['auc'].sum()
###### plot ######
# fig, ax = plt.subplots()
# ks, cumbad, cumgood
plt.plot(dfrocpr.FPR, dfrocpr.TPR, 'k-')
# ks vline
x=np.array(np.arange(0,1.1,0.1))
plt.plot(x, x, 'r--')
# fill
plt.fill_between(dfrocpr.FPR, 0, dfrocpr.TPR, color='blue', alpha=0.1)
# set xylabel
plt.gca().set(title=title+'ROC',
xlabel='FPR', ylabel='TPR',
xlim=[0,1], ylim=[0,1], aspect='equal')
# text
# plt.text(0.5,0.96, 'ROC', fontsize=15, horizontalalignment='center')
plt.text(0.55,0.45, 'AUC:'+str(round(auc,4)), horizontalalignment='center', color='b')
# plt.grid()
# plt.show()
# return fig
# plot ppr
def eva_ppr(dfrocpr, title):
###### plot ######
# fig, ax = plt.subplots()
# ks, cumbad, cumgood
plt.plot(dfrocpr.recall, dfrocpr.precision, 'k-')
# ks vline
x=np.array(np.arange(0,1.1,0.1))
plt.plot(x, x, 'r--')
# set xylabel
plt.gca().set(title=title+'P-R',
xlabel='Recall', ylabel='Precision',
xlim=[0,1], ylim=[0,1], aspect='equal')
# text
# plt.text(0.5,0.96, 'P-R', fontsize=15, horizontalalignment='center')
# plt.grid()
# plt.show()
# return fig
# plot f1
def eva_pf1(dfrocpr, title):
dfrocpr=dfrocpr.assign(pop=lambda x: np.cumsum(x.countpred)/sum(x.countpred))
###### plot ######
# fig, ax = plt.subplots()
# ks, cumbad, cumgood
plt.plot(dfrocpr['pop'], dfrocpr['F1'], 'k-')
# ks vline
F1max_pop = dfrocpr.loc[dfrocpr['F1'].idxmax(),'pop']
F1max_F1 = dfrocpr.loc[dfrocpr['F1'].idxmax(),'F1']
plt.plot([F1max_pop,F1max_pop], [0,F1max_F1], 'r--')
# set xylabel
plt.gca().set(title=title+'F1',
xlabel='% of population', ylabel='F1',
xlim=[0,1], ylim=[0,1], aspect='equal')
# pred text
pred_0=dfrocpr.loc[dfrocpr['pred'].idxmin(),'pred']
pred_F1max=dfrocpr.loc[dfrocpr['F1'].idxmax(),'pred']
pred_1=dfrocpr.loc[dfrocpr['pred'].idxmax(),'pred']
if np.mean(dfrocpr.pred) < 0 or np.mean(dfrocpr.pred) > 1:
pred_0 = -pred_0
pred_F1max = -pred_F1max
pred_1 = -pred_1
plt.text(0, 0, 'pred \n'+str(round(pred_0,4)), horizontalalignment='left',color='b')
plt.text(F1max_pop, 0, 'pred \n'+str(round(pred_F1max,4)), horizontalalignment='center',color='b')
plt.text(1, 0, 'pred \n'+str(round(pred_1,4)), horizontalalignment='right',color='b')
# title F1
plt.text(F1max_pop, F1max_F1, 'F1 max: \n'+ str(round(F1max_F1,4)), horizontalalignment='center',color='b')
# plt.grid()
# plt.show()
# return fig
def perf_eva(label, pred, title=None, groupnum=None, plot_type=["ks", "roc"], show_plot=True, positive="bad|1", seed=186):
'''
KS, ROC, Lift, PR
------
perf_eva provides performance evaluations, such as
kolmogorov-smirnow(ks), ROC, lift and precision-recall curves,
based on provided label and predicted probability values.
Params
------
label: Label values, such as 0s and 1s, 0 represent for good
and 1 for bad.
pred: Predicted probability or score.
title: Title of plot, default is "performance".
groupnum: The group number when calculating KS. Default NULL,
which means the number of sample size.
plot_type: Types of performance plot, such as "ks", "lift", "roc", "pr".
Default c("ks", "roc").
show_plot: Logical value, default is TRUE. It means whether to show plot.
positive: Value of positive class, default is "bad|1".
seed: Integer, default is 186. The specify seed is used for random sorting data.
Returns
------
dict
ks, auc, gini values, and figure objects
Details
------
Accuracy =
true positive and true negative/total cases
Error rate =
false positive and false negative/total cases
TPR, True Positive Rate(Recall or Sensitivity) =
true positive/total actual positive
PPV, Positive Predicted Value(Precision) =
true positive/total predicted positive
TNR, True Negative Rate(Specificity) =
true negative/total actual negative
NPV, Negative Predicted Value =
true negative/total predicted negative
Examples
------
import scorecardpy
# load data
dat = sc.germancredit()
# filter variable via missing rate, iv, identical value rate
dt_sel = sc.var_filter(dat, "creditability")
# woe binning ------
bins = sc.woebin(dt_sel, "creditability")
dt_woe = sc.woebin_ply(dt_sel, bins)
y = dt_woe.loc[:,'creditability']
X = dt_woe.loc[:,dt_woe.columns != 'creditability']
# logistic regression ------
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(penalty='l1', C=0.9, solver='saga')
lr.fit(X, y)
# predicted proability
dt_pred = lr.predict_proba(X)[:,1]
# performace ------
# Example I # only ks & auc values
sc.perf_eva(y, dt_pred, show_plot=False)
# Example II # ks & roc plot
sc.perf_eva(y, dt_pred)
# Example III # ks, lift, roc & pr plot
sc.perf_eva(y, dt_pred, plot_type = ["ks","lift","roc","pr"])
'''
# inputs checking
if len(label) != len(pred):
warnings.warn('Incorrect inputs; label and pred should be list with the same length.')
# if pred is score
if np.mean(pred) < 0 or np.mean(pred) > 1:
warnings.warn('Since the average of pred is not in [0,1], it is treated as predicted score but not probability.')
pred = -pred
# random sort datatable
df = | pd.DataFrame({'label':label, 'pred':pred}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
description: cleaning tools for tidals (tidepool data analytics tools)
created: 2018-07
author: <NAME>
license: BSD-2-Clause
"""
import pandas as pd
import numpy as np
def remove_duplicates(df, criteriaDF):
nBefore = len(df)
df = df.loc[~(criteriaDF.duplicated())]
df = df.reset_index(drop=True)
nDuplicatesRemoved = nBefore - len(df)
return df, nDuplicatesRemoved
def round_time(
df,
timeIntervalMinutes=5,
timeField="time",
roundedTimeFieldName="roundedTime",
verbose=False,
):
# A general purpose round time function that rounds the
# "time" field to nearest <timeIntervalMinutes> minutes
# INPUTS:
# * a dataframe (df) that contains a time field
# * timeIntervalMinutes defaults to 5 minutes given that most cgms output every 5 minutes
# * timeField defaults to UTC time "time"
# * verbose specifies whether the "TIB" and "TIB_cumsum" columns are returned
df.sort_values(by=timeField, ascending=True, inplace=True)
df.reset_index(drop=True, inplace=True)
# calculate the time-in-between (TIB) consecutive records
t = | pd.to_datetime(df.time) | pandas.to_datetime |
"""
Utils for time series generation
--------------------------------
"""
import math
from typing import Union
import numpy as np
import pandas as pd
import holidays
from ..timeseries import TimeSeries
from ..logging import raise_if_not, get_logger
logger = get_logger(__name__)
def constant_timeseries(value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a constant univariate TimeSeries with the given value, length, start date and frequency.
Parameters
----------
value
The constant value that the TimeSeries object will assume at every index.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected;
see `docs <https://pandas.pydata.org/pandas-docs/stable/user_guide/TimeSeries.html#dateoffset-objects>`_.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A constant TimeSeries with value 'value'.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.full(length, value)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def linear_timeseries(start_value: float = 0,
end_value: float = 1,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a univariate TimeSeries with a starting value of `start_value` that increases linearly such that
it takes on the value `end_value` at the last entry of the TimeSeries. This means that
the difference between two adjacent entries will be equal to
(`end_value` - `start_value`) / (`length` - 1).
Parameters
----------
start_value
The value of the first entry in the TimeSeries.
end_value
The value of the last entry in the TimeSeries.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A linear TimeSeries created as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.linspace(start_value, end_value, length)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def sine_timeseries(value_frequency: float = 0.1,
value_amplitude: float = 1.,
value_phase: float = 0.,
value_y_offset: float = 0.,
length: int = 10,
freq: str = 'D',
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a univariate TimeSeries with a sinusoidal value progression with a given frequency, amplitude,
phase and y offset.
Parameters
----------
value_frequency
The number of periods that take place within one time unit given in `freq`.
value_amplitude
The maximum difference between any value of the returned TimeSeries and `y_offset`.
value_phase
The relative position within one period of the first value of the returned TimeSeries (in radians).
value_y_offset
The shift of the sine function along the y axis.
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A sinusoidal TimeSeries parametrized as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.array(range(length), dtype=float)
f = np.vectorize(
lambda x: value_amplitude * math.sin(2 * math.pi * value_frequency * x + value_phase) + value_y_offset
)
values = f(values)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def gaussian_timeseries(length: int = 10,
freq: str = 'D',
mean: Union[float, np.ndarray] = 0.,
std: Union[float, np.ndarray] = 1.,
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a gaussian univariate TimeSeries by sampling all the series values independently,
from a gaussian distribution with mean `mean` and standard deviation `std`.
Parameters
----------
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
mean
The mean of the gaussian distribution that is sampled at each step.
If a float value is given, the same mean is used at every step.
If a numpy.ndarray of floats with the same length as `length` is
given, a different mean is used at each time step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
If a float value is given, the same standard deviation is used at every step.
If an array of dimension `(length, length)` is given, it will
be used as covariance matrix for a multivariate gaussian distribution.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A white noise TimeSeries created as indicated above.
"""
if (type(mean) == np.ndarray):
raise_if_not(mean.shape == (length,), 'If a vector of means is provided, '
'it requires the same length as the TimeSeries.', logger)
if (type(std) == np.ndarray):
raise_if_not(std.shape == (length, length), 'If a matrix of standard deviations is provided, '
'its shape has to match the length of the TimeSeries.', logger)
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.random.normal(mean, std, size=length)
return TimeSeries.from_times_and_values(times, values, freq=freq)
def random_walk_timeseries(length: int = 10,
freq: str = 'D',
mean: float = 0.,
std: float = 1.,
start_ts: pd.Timestamp = pd.Timestamp('2000-01-01')) -> TimeSeries:
"""
Creates a random walk univariate TimeSeries, where each step is obtained by sampling a gaussian distribution
with mean `mean` and standard deviation `std`.
Parameters
----------
length
The length of the returned TimeSeries.
freq
The time difference between two adjacent entries in the returned TimeSeries. A DateOffset alias is expected.
mean
The mean of the gaussian distribution that is sampled at each step.
std
The standard deviation of the gaussian distribution that is sampled at each step.
start_ts
The time index of the first entry in the returned TimeSeries.
Returns
-------
TimeSeries
A random walk TimeSeries created as indicated above.
"""
times = pd.date_range(periods=length, freq=freq, start=start_ts)
values = np.cumsum(np.random.normal(mean, std, size=length))
return TimeSeries.from_times_and_values(times, values, freq=freq)
def holidays_timeseries(time_index,
country_code: str,
prov: str = None,
state: str = None) -> TimeSeries:
"""
Creates a binary univariate TimeSeries with index `time_index` that equals 1 at every index that lies within
(or equals) a selected country's holiday, and 0 otherwise.
Available countries can be found `here <https://github.com/dr-prodigy/python-holidays#available-countries>`_.
Parameters
----------
country_code
The country ISO code
prov
The province
state
The state
Returns
-------
TimeSeries
A new binary holiday TimeSeries instance.
"""
scope = range(time_index[0].year, (time_index[-1] + | pd.Timedelta(days=1) | pandas.Timedelta |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.