prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
from application import model_builder
def test_cluster():
# Arrange
df = pd.DataFrame()
df["Some Feature"] = [30.0, 40.0, 50.0, 50, 49, 29]
df["Some Feature 2"] = [5, 6, 6, 6, 5.9, 4.9]
df["Some Feature 3"] = [100, 90, 90, 91, 90, 101]
df["Answer"] = [0, 1, 1, 1, 1, 0]
# Act - Data is arranged to have 2 extreme clusters to force predictable results
cluster_labels, cluster_count = list(model_builder.cluster(df, random_state=0))
# Assert
assert list(cluster_labels) == [0, 1, 1, 1, 1, 0]
assert cluster_count == 2
def test_determine_target_cluster_success():
# Arrange
success = | pd.Series([0, 1, 1, 1, 0, 1]) | pandas.Series |
from scipy.stats import entropy, skew, kurtosis, iqr
from tsfresh.feature_extraction.feature_calculators import *
import pandas as pd
import numpy as np
def fftfun(data):
features = np.zeros(21)
f, Pxx_den = welch(data, 50)
indices_of_max = Pxx_den.argsort()[-3:][::-1]
features[0:3] = (f[indices_of_max])
features[3:6] = (Pxx_den[indices_of_max])
Y = np.fft.rfft(data)
energy_feat = np.sum(np.square(np.abs(Y))) / len(data)
entropy_feat = entropy(np.abs(Y))
features[6] = energy_feat
features[7] = entropy_feat
total_fft_sum = np.sum(np.square(Pxx_den))
bin1 = np.sum(np.square(Pxx_den[:5])) / total_fft_sum
bin2 = np.sum(np.square(Pxx_den[5:10])) / total_fft_sum
bin3 = np.sum(np.square(Pxx_den[10:15])) / total_fft_sum
bin4 = np.sum(np.square(Pxx_den[15:20])) / total_fft_sum
bin5 = np.sum(np.square(Pxx_den[20:25])) / total_fft_sum
bin6 = np.sum(np.square(Pxx_den[25:30])) / total_fft_sum
bin7 = np.sum(np.square(Pxx_den[30:35])) / total_fft_sum
bin8 = np.sum(np.square(Pxx_den[35:40])) / total_fft_sum
bin9 = np.sum(np.square(Pxx_den[40:45])) / total_fft_sum
bin10 = np.sum(np.square(Pxx_den[45:])) / total_fft_sum
features[8:18] = [bin1, bin2, bin3, bin4, bin5, bin6, bin7, bin8, bin9, bin10]
skewness = skew(Pxx_den)
kurtos = kurtosis(Pxx_den)
interquart = iqr(data)
features[18:21] = [skewness, kurtos, interquart]
return pd.Series(features)
def fun_fft_coefficient(data, param):
re = data.apply(lambda x: fft_coefficient(x, [{'coeff': 2, 'attr': param}]))
re = re.apply(lambda x: list(*zip(x))[0][1])
return re
def fun_time_reversal_asymmetry_statistic(data):
re = data.apply(lambda x: time_reversal_asymmetry_statistic(x, 200))
return re
def fun_cid_ce(data):
re = data.apply(lambda x: cid_ce(x, True))
return re
def fun_autocorrelation(data):
re = data.apply(lambda x: autocorrelation(x, 200))
return re
def fun_ratio_beyond_r_sigma(data):
re = data.apply(lambda x: ratio_beyond_r_sigma(x, 1))
return re
def fun_spkt_welch_density(data):
param = [{"coeff": 1}]
re = data.apply(lambda x: spkt_welch_density(x=x, param=param))
re = re.apply(lambda x: list(*zip(x))[0][1])
return re
def fun_index_mass_quantile(x):
re = x.apply(lambda x: index_mass_quantile(x, [{"q": 0.65}]))
re = re.apply(lambda x: list(*zip(x))[0][1])
return re
def fun(x, f):
re = x.apply(lambda x: fft_aggregated(x, [{"aggtype": f}]))
re = re.apply(lambda x: list(*zip(x))[0][1])
return re
def fun_variance(x):
re = x.apply(lambda x: fft_aggregated(x, [{"aggtype": "variance"}]))
re = re.apply(lambda x: list(*zip(x))[0][1])
return re
def feature_calculator(data):
result = []
res = data.apply(fftfun)
result.extend(res.values.flatten().tolist())
fft_coefficient_real = fun_fft_coefficient(data, "real")
result.extend(fft_coefficient_real.values.tolist())
fft_coefficient_imag = fun_fft_coefficient(data, "imag")
result.extend(fft_coefficient_imag.tolist())
fft_coefficient_abs = fun_fft_coefficient(data, "abs")
result.extend(fft_coefficient_abs.tolist())
fft_coefficient_angle = fun_fft_coefficient(data, "angle")
result.extend(fft_coefficient_angle.tolist())
time_reversal_asymmetry_statistic = fun_time_reversal_asymmetry_statistic(data)
result.extend(time_reversal_asymmetry_statistic.tolist())
cid_ce = fun_cid_ce(data)
result.extend(cid_ce.tolist())
autocorrelation = fun_autocorrelation(data)
result.extend(autocorrelation.tolist())
ratio_beyond_r_sigma = fun_ratio_beyond_r_sigma(data)
result.extend(ratio_beyond_r_sigma.tolist())
spkt_welch_density = fun_spkt_welch_density(data)
result.extend(spkt_welch_density.tolist())
mean_second_derivative_central1 = data.apply(mean_second_derivative_central)
result.extend(mean_second_derivative_central1.tolist())
index_mass_quantile = fun_index_mass_quantile(data)
result.extend(index_mass_quantile.tolist())
fft_aggregated_centroid = fun(data, "centroid")
result.extend(fft_aggregated_centroid.tolist())
# ------------------------------------------------------------
fft_aggregated_skew = fun(data, "skew")
result.extend(fft_aggregated_skew.tolist())
# -----------------------------------------------------------------------
fft_aggregated_kurtosis = fun(data, "kurtosis")
result.extend(fft_aggregated_kurtosis.tolist())
fft_aggregated_variance = fun_variance(data)
result.extend(fft_aggregated_variance.tolist())
# # ---------------------------------abs_energy------------------------------------------
abs_energy1 = data.apply(abs_energy)
result.extend(abs_energy1.tolist())
#
# # ----------------------------------------numpy的统计函数-------------------------------------------------
#
median = data.median()
mean = data.mean()
max = data.max()
min = data.min()
var = data.var()
std = data.std()
result.extend(median.tolist())
result.extend(mean.tolist())
result.extend(max.tolist())
result.extend(min.tolist())
result.extend(var.tolist())
result.extend(std.tolist())
# # # ---------------------------------------------------------------------------------------------------
count_below_mean1 = data.apply(count_below_mean)
result.extend(count_below_mean1.tolist())
# #
# # # --------------------------------------------------------------------------------------------------------------
# # #
absolute_sum_of_changes1 = data.apply(absolute_sum_of_changes)
result.extend(absolute_sum_of_changes1.tolist())
# --------------------------------------------------------------------------------------------------------------
kurtosis1 = data.apply(kurtosis)
result.extend(kurtosis1.tolist())
# # #
# # # # --------------------------------------------------------------------------------------------------------------
# # #
mean_abs_change1 = data.apply(mean_abs_change)
result.extend(mean_abs_change1.tolist())
skewness1 = data.apply(skewness)
result.extend(skewness1.tolist())
# # #
# # # # -------------------------------------------------------------------------------
variance_larger_than_standard_deviation1 = data.apply(variance_larger_than_standard_deviation)
result.extend(variance_larger_than_standard_deviation1.tolist())
# # # #
# # # # -------------------------------------------------------------------------------
percentage_of_reoccurring_values_to_all_values1 = data.apply(percentage_of_reoccurring_values_to_all_values)
result.extend(percentage_of_reoccurring_values_to_all_values1.tolist())
# #
# # #-------------------------------------------------------------------------------
number_peaks1 = number_peaks(data, 3)
result.extend(number_peaks1.tolist())
# result = np.asarray(result).reshape(1,-1)
# print(result.shape)
return result
def rotate(data):
orientation = np.asarray([data['o_w'], data['o_x'], data['o_y'], data['o_z']])
orien = orientation.T
rn0 = 1 - 2 * (np.square(orien[:, 2]) + np.square(orien[:, 3]))
rn1 = 2 * (orien[:, 1] * orien[:, 2] - orien[:, 0] * orien[:, 3])
rn2 = 2 * (orien[:, 1] * orien[:, 3] + orien[:, 0] * orien[:, 2])
rn3 = 2 * (orien[:, 1] * orien[:, 2] + orien[:, 0] * orien[:, 3])
rn4 = 1 - 2 * (np.square(orien[:, 1]) + np.square(orien[:, 3]))
rn5 = 2 * (orien[:, 2] * orien[:, 3] - orien[:, 0] * orien[:, 1])
rn6 = 2 * (orien[:, 1] * orien[:, 3] - orien[:, 0] * orien[:, 2])
rn7 = 2 * (orien[:, 2] * orien[:, 3] + orien[:, 0] * orien[:, 1])
rn8 = 1 - 2 * (np.square(orien[:, 1]) + np.square(orien[:, 2]))
acc = np.asarray([data['acc_x'], data['acc_y'], data['acc_z']])
acc_x = pd.DataFrame(rn0 * acc[0] + rn1 * acc[1] + rn2 * acc[2])
acc_y = pd.DataFrame(rn3 * acc[0] + rn4 * acc[1] + rn5 * acc[2])
acc_z = pd.DataFrame(rn6 * acc[0] + rn7 * acc[1] + rn8 * acc[2])
acc_xyz = pd.DataFrame(np.sqrt(np.square(data['acc_x'] + np.square(data['acc_y']) + np.square(data['acc_z']))))
acc = pd.DataFrame(np.hstack((acc_xyz, acc_x, acc_y, acc_z)))
pitch = pd.DataFrame(np.arctan(rn7 / rn8))
roll = pd.DataFrame(np.arcsin(-rn6))
yaw = pd.DataFrame(np.arctan(rn3 / rn0))
orien = pd.DataFrame(orien)
ori = pd.concat((orien, pitch, roll, yaw), axis=1)
# 输出格式为['o_w','o_x', 'o_y', 'o_z', 'pitch', 'roll', 'yaw']
# -----------------------------------------------------------------------------------------------
# 对m_x,m_y,m_z取平方和之后开根号,作为新的列值,并且对magnetic做坐标转化
mag = np.asarray([data['mag_x'], data['mag_y'], data['mag_z']])
mag_x = pd.DataFrame(rn0 * mag[0] + rn1 * mag[1] + rn2 * mag[2])
mag_y = pd.DataFrame(rn3 * mag[0] + rn4 * mag[1] + rn5 * mag[2])
mag_z = pd.DataFrame(rn6 * mag[0] + rn7 * mag[1] + rn8 * mag[2])
mag = mag.T
ma = np.sqrt(np.square(mag[:, 0]) + np.square(mag[:, 1]) + np.square(mag[:, 2])).reshape(-1, 1)
magnetic = pd.DataFrame(np.hstack((ma, mag_x, mag_y, mag_z)))
# 输出格式为['ma','m_x', 'm_y', 'm_z']
# -----------------------------------------------------------------------------------------------
gyr_xyz = np.sqrt(np.square(data['gyr_x'] + np.square(data['gyr_y']) + np.square(data['gyr_z'])))
gra_xyz = np.sqrt(np.square(data['gra_x'] + np.square(data['gra_y']) + np.square(data['gra_z'])))
lacc_xyz = np.sqrt(np.square(data['lacc_x'] + np.square(data['lacc_y']) + np.square(data['lacc_z'])))
remain = pd.DataFrame(np.asarray([gyr_xyz, data['gyr_x'], data['gyr_y'], data['gyr_z'],
gra_xyz, data['gra_x'], data['gra_y'], data['gra_z'],
lacc_xyz, data['lacc_x'], data['lacc_y'], data['lacc_z']
]).T)
fin = | pd.concat((acc, ori, magnetic, remain), axis=1) | pandas.concat |
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import v3io_frames.frames_pb2 as fpb
from conftest import here
from v3io_frames import pbutils
def test_encode_df():
labels = {
'int': 7,
'str': 'wassup?',
}
df = pd.read_csv('{}/weather.csv'.format(here))
df['STATION_CAT'] = df['STATION'].astype('category')
df['WDF2_F'] = df['WDF2'].astype(np.float)
msg = pbutils.df2msg(df, labels)
names = [col.name for col in msg.columns]
assert set(names) == set(df.columns), 'columns mismatch'
assert not msg.indices, 'has index'
assert pbutils.pb2py(msg.labels) == labels, 'lables mismatch'
# Now with index
index_name = 'DATE'
df = df.set_index(index_name)
msg = pbutils.df2msg(df, None)
names = [col.name for col in msg.columns]
assert set(names) == set(df.columns), 'columns mismatch'
assert msg.indices, 'no index'
assert msg.indices[0].name == index_name, 'bad index name'
def test_multi_index():
tuples = [
('bar', 'one'),
('bar', 'two'),
('baz', 'one'),
('baz', 'two'),
('foo', 'one'),
('foo', 'two'),
('qux', 'one'),
('qux', 'two')]
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df = pd.DataFrame(index=index)
df['x'] = range(len(df))
data = pbutils.df2msg(df).SerializeToString()
msg = fpb.Frame.FromString(data)
for col in msg.indices:
values = col.strings
assert len(values) == len(df), 'bad index length'
def test_categorical():
s = | pd.Series(['a', 'b', 'c'] * 7, name='cat') | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# ## Aim: Collect and combine relevant mouse expression data from the Stemformatics data portal
#
# Link: https://www.stemformatics.org/workbench/download_multiple_datasets.
#
# Stemformatics is an established gene expression data portal containing over 420 public gene expression datasets derived from microarray, RNA sequencing and single cell profiling technologies. It includes curated ‘collections’ of data relevant to cell reprogramming, as well as hematopoiesis and leukaemia.
#
# ### Samples
#
# Set the serch field to 'species' and use 'Mus musculus' as search key.
#
# ### Processing steps
#
# - Sample selection
# - Combine selected datasets based on platforms
# - Combine all selected datasets
# In[1]:
import pandas as pd
import numpy as np
import atlas
import handler
import requests
# In[2]:
# inspect the samples metadata
samples = pd.read_csv('/Users/monica/Downloads/export_metadata_samples_v7.2.4.tsv', sep='\t', index_col=2)
samples.head()
# Many of the samples are not healthy blood cells (e.g. iPSCs, AML samples etc.). We will need to select for healthy blood cells from the metadata, and download only the selected samples.
# ### Sample selction
# In[3]:
def select_samples(samples):
'''This function takes the Stemformatics samples metadata and returns samples that are annotated to be blood cells.'''
pos_selected_id = []
neg_selected_id = []
patterns_pos = ['lymp', '[Hh]aem', '[Hh]em', 'HSC','[Mm]ono', '[Bb]-*\ *cell', '[Mm]yelo', 'killer',
'NK', '[Mm]eg', '[Bb]aso', '[Nn]eut', '[Ee]os', '[Pp]las', '[Ee]ryt', '\W[Tt]-*\ *cell', 'DC', '[Dd]endri',
'phage', 'macr']
patterns_neg = ['iPS', 'MSC', 'AML', 'reprogram', 'MAPC', 'KO', 'endothelial', 'LPS', 'mutant', 'Dusp', 'LCMV', 'LSK', 'Chaudhury', 'BLSP',
'Bruttger']
for col in samples.columns:
l = samples[samples[col].notna()]
for p in patterns_pos:
pos_selected_id += l[(l[col].astype(str).str.contains(p) == True)].index.tolist()
for n in patterns_neg:
neg_selected_id += l[(l[col].astype(str).str.contains(n) == True)].index.tolist()
selected = samples.loc[samples.index.isin(set(pos_selected_id))]
return selected.loc[~selected.index.isin(set(neg_selected_id))]
# In[4]:
selected_samples = select_samples(samples)
print(selected_samples.shape)
selected_samples.head() # 324 samples are selected
# ### Combine datasets based on platforms
# In[5]:
# add platform information of to samples metadata
datasets = | pd.read_csv('/Users/monica/Downloads/export_metadata_datasets_v7.2.4.tsv', sep='\t', index_col=0) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_items(items, columns=columns, orient=orient)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ge, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.ge(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return self.ftypes.value_counts().sort_index()
def get_value(self, index, col, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.get_value, index, col, takeable=takeable
)
def get_values(self):
return self._default_to_pandas(pandas.DataFrame.get_values)
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.gt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.gt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def head(self, n=5):
"""Get the first n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the first n rows of the DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.head(n))
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs
)
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmax(axis=axis, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmin(axis=axis, skipna=skipna)
def infer_objects(self):
return self._default_to_pandas(pandas.DataFrame.infer_objects)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Args:
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns:
Prints the summary of a DataFrame and returns None.
"""
# We will default to pandas because it will be faster than doing two passes
# over the data
buf = sys.stdout if not buf else buf
import io
with io.StringIO() as tmp_buf:
self._default_to_pandas(
pandas.DataFrame.info,
verbose=verbose,
buf=tmp_buf,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts,
)
result = tmp_buf.getvalue()
result = result.replace(
"pandas.core.frame.DataFrame", "modin.pandas.dataframe.DataFrame"
)
buf.write(result)
return None
index = self.index
columns = self.columns
dtypes = self.dtypes
# Set up default values
verbose = True if verbose is None else verbose
buf = sys.stdout if not buf else buf
max_cols = 100 if not max_cols else max_cols
memory_usage = True if memory_usage is None else memory_usage
if not null_counts:
if len(columns) < 100 and len(index) < 1690785:
null_counts = True
else:
null_counts = False
# Determine if actually verbose
actually_verbose = True if verbose and max_cols > len(columns) else False
if type(memory_usage) == str and memory_usage == "deep":
memory_usage_deep = True
else:
memory_usage_deep = False
# Start putting together output
# Class denoted in info() output
class_string = "<class 'modin.pandas.dataframe.DataFrame'>\n"
# Create the Index info() string by parsing self.index
index_string = index.summary() + "\n"
if null_counts:
counts = self._query_compiler.count()
if memory_usage:
memory_usage_data = self._query_compiler.memory_usage(
deep=memory_usage_deep, index=True
)
if actually_verbose:
# Create string for verbose output
col_string = "Data columns (total {0} columns):\n".format(len(columns))
for col, dtype in zip(columns, dtypes):
col_string += "{0}\t".format(col)
if null_counts:
col_string += "{0} not-null ".format(counts[col])
col_string += "{0}\n".format(dtype)
else:
# Create string for not verbose output
col_string = "Columns: {0} entries, {1} to {2}\n".format(
len(columns), columns[0], columns[-1]
)
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + "\n"
# Create memory usage string
memory_string = ""
if memory_usage:
if memory_usage_deep:
memory_string = "memory usage: {0} bytes".format(memory_usage_data)
else:
memory_string = "memory usage: {0}+ bytes".format(memory_usage_data)
# Combine all the components of the info() output
result = "".join(
[class_string, index_string, col_string, dtypes_string, memory_string]
)
# Write to specified output buffer
buf.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
else:
if not is_list_like(value):
value = np.full(len(self.index), value)
if not isinstance(value, pandas.Series) and len(value) != len(self.index):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
downcast=None,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast,
**kwargs
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
index_iter = iter(self.index)
def iterrow_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.iterrows()
partition_iterator = PartitionIterator(self._query_compiler, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
col_iter = iter(self.columns)
def items_builder(df):
df.columns = [next(col_iter)]
df.index = self.index
return df.items()
partition_iterator = PartitionIterator(self._query_compiler, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
index_iter = iter(self.index)
def itertuples_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.itertuples(index=index, name=name)
partition_iterator = PartitionIterator(
self._query_compiler, 0, itertuples_builder
)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.join,
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
if isinstance(other, pandas.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
pandas.DataFrame(columns=self.columns).join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
other._query_compiler,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
# See note above about error checking with an empty join.
pandas.DataFrame(columns=self.columns).join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
[obj._query_compiler for obj in other],
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurt,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurtosis,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def last(self, offset):
return self._default_to_pandas(pandas.DataFrame.last, offset)
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.le, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.le(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.lt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.lt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mad(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.mad, axis=axis, skipna=skipna, level=level
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mask,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.max(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.mean(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.median(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return self._default_to_pandas(
pandas.DataFrame.melt,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
result = self._query_compiler.memory_usage(index=index, deep=deep)
result.index = self.columns
if index:
index_value = self.index.memory_usage(deep=deep)
return pandas.Series(index_value, index=["Index"]).append(result)
return result
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
right: The DataFrame to merge against.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(right, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type "
"{}".format(type(right))
)
if left_index is False or right_index is False:
if isinstance(right, DataFrame):
right = right._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.merge,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.min(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mode(self, axis=0, numeric_only=False):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only
)
)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mul,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mul(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def multiply(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis="columns", level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ne, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.ne(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def nlargest(self, n, columns, keep="first"):
return self._default_to_pandas(pandas.DataFrame.nlargest, n, columns, keep=keep)
def notna(self):
"""Perform notna across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notnull())
def nsmallest(self, n, columns, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.nsmallest, n, columns, keep=keep
)
def nunique(self, axis=0, dropna=True):
"""Return Series with number of distinct
observations over requested axis.
Args:
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Returns:
nunique : Series
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.nunique(axis=axis, dropna=dropna)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.pct_change,
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs
)
def pipe(self, func, *args, **kwargs):
"""Apply func(self, *args, **kwargs)
Args:
func: function to apply to the df.
args: positional arguments passed into ``func``.
kwargs: a dictionary of keyword arguments passed into ``func``.
Returns:
object: the return type of ``func``.
"""
return com._pipe(self, func, *args, **kwargs)
def pivot(self, index=None, columns=None, values=None):
return self._default_to_pandas(
pandas.DataFrame.pivot, index=index, columns=columns, values=values
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
return self._default_to_pandas(
pandas.DataFrame.pivot_table,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
)
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs
):
return to_pandas(self).plot
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.pow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.pow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
prod : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
return self._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def product(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
product : Series or DataFrame (if level specified)
"""
return self.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
def check_dtype(t):
return is_numeric_dtype(t) or is_datetime_or_timedelta_dtype(t)
if not numeric_only:
# If not numeric_only and columns, then check all columns are either
# numeric, timestamp, or timedelta
if not axis and not all(check_dtype(t) for t in self.dtypes):
raise TypeError("can't multiply sequence by non-int of type 'float'")
# If over rows, then make sure that all dtypes are equal for not
# numeric_only
elif axis:
for i in range(1, len(self.dtypes)):
pre_dtype = self.dtypes[i - 1]
curr_dtype = self.dtypes[i]
if not is_dtype_equal(pre_dtype, curr_dtype):
raise TypeError(
"Cannot compare type '{0}' with type '{1}'".format(
pre_dtype, curr_dtype
)
)
else:
# Normally pandas returns this near the end of the quantile, but we
# can't afford the overhead of running the entire operation before
# we error.
if not any(is_numeric_dtype(t) for t in self.dtypes):
raise ValueError("need at least one array to concatenate")
# check that all qs are between 0 and 1
pandas.DataFrame()._check_percentile(q)
axis = pandas.DataFrame()._get_axis_number(axis)
if isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)):
return DataFrame(
query_compiler=self._query_compiler.quantile_for_list_of_values(
q=q,
axis=axis,
numeric_only=numeric_only,
interpolation=interpolation,
)
)
else:
return self._query_compiler.quantile_for_single_value(
q=q, axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
ErrorMessage.non_verified_udf()
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.query(expr, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def radd(self, other, axis="columns", level=None, fill_value=None):
return self.add(other, axis, level, fill_value)
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
Equal values are assigned a rank that is the [method] of
the ranks of those values.
Args:
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
method: {'average', 'min', 'max', 'first', 'dense'}
Specifies which method to use for equal vals
numeric_only (boolean)
Include only float, int, boolean data.
na_option: {'keep', 'top', 'bottom'}
Specifies how to handle NA options
ascending (boolean):
Decedes ranking order
pct (boolean):
Computes percentage ranking of data
Returns:
A new DataFrame
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.rank(
axis=axis,
method=method,
numeric_only=numeric_only,
na_option=na_option,
ascending=ascending,
pct=pct,
)
)
def rdiv(self, other, axis="columns", level=None, fill_value=None):
"""Div this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rdiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rdiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=np.nan,
limit=None,
tolerance=None,
):
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.reindex,
labels=labels,
index=index,
columns=columns,
axis=axis,
method=method,
copy=copy,
level=level,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis == 0 and labels is not None:
index = labels
elif labels is not None:
columns = labels
if index is not None:
new_query_compiler = self._query_compiler.reindex(
0,
index,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
new_query_compiler = self._query_compiler
if columns is not None:
final_query_compiler = new_query_compiler.reindex(
1,
columns,
method=method,
fill_value=fill_value,
limit=limit,
tolerance=tolerance,
)
else:
final_query_compiler = new_query_compiler
return self._create_dataframe_from_compiler(final_query_compiler, not copy)
def reindex_axis(
self,
labels,
axis=0,
method=None,
level=None,
copy=True,
limit=None,
fill_value=np.nan,
):
return self._default_to_pandas(
pandas.DataFrame.reindex_axis,
labels,
axis=axis,
method=method,
level=level,
copy=copy,
limit=limit,
fill_value=fill_value,
)
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.reindex_like,
other,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=None,
copy=True,
inplace=False,
level=None,
):
"""Alters axes labels.
Args:
mapper, index, columns: Transformations to apply to the axis's
values.
axis: Axis to target with mapper.
copy: Also copy underlying data.
inplace: Whether to return a new DataFrame.
level: Only rename a specific level of a MultiIndex.
Returns:
If inplace is False, a new DataFrame with the updated axes.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# We have to do this with the args because of how rename handles
# kwargs. It doesn't ignore None values passed in, so we have to filter
# them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items() if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we
# will use the results after.
kwargs["inplace"] = True
df_to_rename = pandas.DataFrame(index=self.index, columns=self.columns)
df_to_rename.rename(**kwargs)
if inplace:
obj = self
else:
obj = self.copy()
obj.index = df_to_rename.index
obj.columns = df_to_rename.columns
if not inplace:
return obj
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.name = mapper
else:
renamed.index.name = mapper
if not inplace:
return renamed
def _set_axis_name(self, name, axis=0, inplace=False):
"""Alter the name or names of the axis.
Args:
name: Name for the Index, or list of names for the MultiIndex
axis: 0 or 'index' for the index; 1 or 'columns' for the columns
inplace: Whether to modify `self` directly or return a copy
Returns:
Type of caller or None if inplace=True.
"""
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.set_names(name)
else:
renamed.index.set_names(name)
if not inplace:
return renamed
def reorder_levels(self, order, axis=0):
return self._default_to_pandas(
pandas.DataFrame.reorder_levels, order, axis=axis
)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return self._default_to_pandas(
pandas.DataFrame.replace,
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
return self._default_to_pandas(
pandas.DataFrame.resample,
rule,
how=how,
axis=axis,
fill_method=fill_method,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
limit=limit,
base=base,
on=on,
level=level,
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into DataFrame columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# TODO Implement level
if level is not None:
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.reset_index,
level=level,
drop=drop,
inplace=inplace,
col_level=col_level,
col_fill=col_fill,
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
# Error checking for matching Pandas. Pandas does not allow you to
# insert a dropped index into a DataFrame if these columns already
# exist.
if (
not drop
and not isinstance(self.index, pandas.MultiIndex)
and all(n in self.columns for n in ["level_0", "index"])
):
raise ValueError("cannot insert level_0, already exists")
new_query_compiler = self._query_compiler.reset_index(drop=drop, level=level)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def rfloordiv(self, other, axis="columns", level=None, fill_value=None):
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rfloordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rfloordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmod(self, other, axis="columns", level=None, fill_value=None):
"""Mod this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the div against this.
axis: The axis to div over.
level: The Multilevel index level to apply div over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the rdiv applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rmod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.rmod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rmul(self, other, axis="columns", level=None, fill_value=None):
return self.mul(other, axis, level, fill_value)
def rolling(
self,
window,
min_periods=None,
freq=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
return self._default_to_pandas(
pandas.DataFrame.rolling,
window,
min_periods=min_periods,
freq=freq,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
def round(self, decimals=0, *args, **kwargs):
"""Round each element in the DataFrame.
Args:
decimals: The number of decimals to round to.
Returns:
A new DataFrame.
"""
return DataFrame(
query_compiler=self._query_compiler.round(decimals=decimals, **kwargs)
)
def rpow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rpow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
# Check to make sure integers are not raised to negative integer powers
if (
is_integer_dtype(type(other))
and other < 0
and all(is_integer_dtype(t) for t in self.dtypes)
):
raise ValueError("Integers to negative integer powers are not allowed.")
new_query_compiler = self._query_compiler.rpow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rsub(self, other, axis="columns", level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: The axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.rsub,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_time_only=True)
new_query_compiler = self._query_compiler.rsub(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def rtruediv(self, other, axis="columns", level=None, fill_value=None):
return self.truediv(other, axis, level, fill_value)
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
axis_labels = self.columns
axis_length = len(axis_labels)
else:
# Getting rows requires indices instead of labels. RangeIndex provides this.
axis_labels = pandas.RangeIndex(len(self.index))
axis_length = len(axis_labels)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, pandas.Series):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, string_types):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a valid column")
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
weights = pandas.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError("Please enter a value for `frac` OR `n`, not both")
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
if n == 0:
# An Empty DataFrame is returned if the number of samples is 0.
# The Empty Dataframe should have either columns or index specified
# depending on which axis is passed in.
return DataFrame(
columns=[] if axis == 1 else self.columns,
index=self.index if axis == 1 else [],
)
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.randomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError(
"Please enter an `int` OR a "
"np.random.RandomState for random_state"
)
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.choice(
np.arange(0, axis_length), size=n, replace=replace
)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(
a=axis_labels, size=n, replace=replace, p=weights
)
if axis:
query_compiler = self._query_compiler.getitem_column_array(samples)
return DataFrame(query_compiler=query_compiler)
else:
query_compiler = self._query_compiler.getitem_row_array(samples)
return DataFrame(query_compiler=query_compiler)
def select(self, crit, axis=0):
return self._default_to_pandas(pandas.DataFrame.select, crit, axis=axis)
def select_dtypes(self, include=None, exclude=None):
# Validates arguments for whether both include and exclude are None or
# if they are disjoint. Also invalidates string dtypes.
pandas.DataFrame().select_dtypes(include, exclude)
if include and not is_list_like(include):
include = [include]
elif not include:
include = []
if exclude and not is_list_like(exclude):
exclude = [exclude]
elif not exclude:
exclude = []
sel = tuple(map(set, (include, exclude)))
include, exclude = map(lambda x: set(map(_get_dtype_from_object, x)), sel)
include_these = pandas.Series(not bool(include), index=self.columns)
exclude_these = pandas.Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(
is_dtype_instance_mapper, self.dtypes.iteritems()
):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
indicate = [
i for i in range(len(dtype_indexer.values)) if not dtype_indexer.values[i]
]
return self.drop(columns=self.columns[indicate], inplace=False)
def sem(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
return self._default_to_pandas(
pandas.DataFrame.sem,
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs
)
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pandas.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and "labels" as second, is still supported '
"but will be deprecated in a future version of pandas.",
FutureWarning,
stacklevel=2,
)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
"set_axis currently defaults to operating inplace.\nThis "
"will change in a future version of pandas, use "
"inplace=True to avoid this warning.",
FutureWarning,
stacklevel=2,
)
inplace = True
if inplace:
setattr(self, pandas.DataFrame()._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pandas.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pandas.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pandas.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pandas.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pandas.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = | _ensure_index_from_sequences(arrays, names) | pandas.core.index._ensure_index_from_sequences |
#!/usr/bin/env python
# coding: utf-8
# In[27]:
import pandas as pd
xl = "C:\\Users\\zachi\\Desktop\\final_project_22\\data_nov_21.xlsx"
#def clean_data (xl,month,platform):
def clean_data (xl,first_day,last_day,platform):
df= | pd.read_excel(xl) | pandas.read_excel |
import os
import numpy as np
from openpyxl import load_workbook
import pandas as pd
import pytest
from geochem_dataset.excel import Dataset
from geochem_dataset.excel.dataclasses import Document
from geochem_dataset.excel.exceptions import IntegrityError
from helpers.utils import xlref, xlrowref, xlcolref
TEST_FILE_NAME = 'DOCUMENT.xlsx'
TEST_SHEET_NAME = 'DOCUMENT'
TEST_COLUMNS = ('RECOMMENDED_CITATION',)
TEST_DATA = [
('A test citation',)
]
ERROR_MESSAGES = {
'missing_worksheet': 'Worksheet {worksheet} is missing from workbook {workbook}',
'missing_columns': 'Worksheet {workbook}::{worksheet} is missing columns: {column_names}',
'extra_columns': 'Worksheet {workbook}::{worksheet} has extra columns: {column_names}',
'too_few_rows': 'Worksheet {workbook}::{worksheet} has too few rows (min is {min_rows} and max is {max_rows})',
'too_many_rows': 'Worksheet {workbook}::{worksheet} has too many rows (min is {min_rows} and max is {max_rows})',
}
class TestDocuments:
def test_documents(self, dataset_path):
# Build expected
expected_documents = [Document(*args) for args in TEST_DATA]
# Assert
with Dataset(dataset_path) as dataset:
documents = list(dataset.documents)
assert documents == expected_documents
def test_documents_with_empty_file(self, dataset_path):
# Modify documents file
document_path = dataset_path / TEST_FILE_NAME
os.truncate(document_path, 0)
# Assert
with pytest.raises(ValueError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
def test_documents_with_missing_sheet(self, dataset_path):
# Modify
documents_path = dataset_path / TEST_FILE_NAME
wb = load_workbook(documents_path)
ws = wb[TEST_SHEET_NAME]
ws.title = "Skittles"
wb.save(documents_path)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['missing_worksheet'].format(**expected_error_msg_kwargs)
def test_documents_with_missing_columns(self, dataset_path):
document_path = dataset_path / TEST_FILE_NAME
with pd.ExcelWriter(document_path) as writer:
df = pd.DataFrame()
df.to_excel(writer, sheet_name=TEST_SHEET_NAME, index=False)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
'column_names': ', '.join(sorted(TEST_COLUMNS)),
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['missing_columns'].format(**expected_error_msg_kwargs)
def test_documents_with_extra_columns(self, dataset_path):
document_path = dataset_path / TEST_FILE_NAME
df = pd.read_excel(document_path, sheet_name=TEST_SHEET_NAME)
with pd.ExcelWriter(document_path) as writer:
df['DOG'] = ['Yoru']
df['CAT'] = ['Skittles']
df.to_excel(writer, sheet_name=TEST_SHEET_NAME, index=False)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
'column_names': 'CAT, DOG',
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as datasetError:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['extra_columns'].format(**expected_error_msg_kwargs)
def test_documents_with_extra_columns_ok(self, dataset_path):
# Modify file
document_path = dataset_path / TEST_FILE_NAME
df = pd.read_excel(document_path, sheet_name=TEST_SHEET_NAME)
with pd.ExcelWriter(document_path) as writer:
df['CAT'] = ['Skittles']
df.to_excel(writer, sheet_name=TEST_SHEET_NAME, index=False)
# Build expected
NEW_TEST_DATA = TEST_DATA.copy()
NEW_TEST_DATA[0] = NEW_TEST_DATA[0] + (frozenset((('cat', 'Skittles'),)),)
expected_documents = [Document(*args) for args in NEW_TEST_DATA]
# Assert
with Dataset(dataset_path, extra_columns_ok=True) as dataset:
documents = list(dataset.documents)
assert documents == expected_documents
def test_documents_with_no_data(self, dataset_path):
# Modify samples file
document_path = dataset_path / TEST_FILE_NAME
df = pd.read_excel(document_path, sheet_name=TEST_SHEET_NAME)
with pd.ExcelWriter(document_path) as writer:
df = df[0:0]
df.to_excel(writer, sheet_name=TEST_SHEET_NAME, index=False)
# Expected
expected_error_msg_kwargs = {
'workbook': TEST_FILE_NAME,
'worksheet': TEST_SHEET_NAME,
'min_rows': 1,
'max_rows': 1,
}
# Assert
with pytest.raises(IntegrityError) as excinfo:
with Dataset(dataset_path) as dataset:
pass
assert excinfo.value.args[0] == ERROR_MESSAGES['too_few_rows'].format(**expected_error_msg_kwargs)
def test_documents_with_too_much_data(self, dataset_path):
# Modify samples file
document_path = dataset_path / TEST_FILE_NAME
df = pd.read_excel(document_path, sheet_name=TEST_SHEET_NAME)
with | pd.ExcelWriter(document_path) | pandas.ExcelWriter |
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_absolute_error, f1_score
import pandas as pd
from pandas.api.types import (
is_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_string_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
)
# if the number is 4, then it is possible to detect patterns when there are at least 4 times the same observation. If the limit is increased, the minimum observations also increase. This is important, because this is the limit when sklearn will throw an error which will lead to a score of 0 if we catch it
CV_ITERATIONS = 4
RANDOM_SEED = 587136
# if a numeric column has less than 15 unique values, it is inferred as categoric
# thus, the ppscore will use a classification
# this has important implications on the ppscore
# eg if you have 4 equal categories encoded 0, 1, 2, 3 and treat it as a regression
# then the baseline is 1 (median) which is okayish and a predictor will have a harder time
# to beat the baseline, thus the ppscore will be considerably lower
# if the column is encoded as category, then the baseline will be to always predict 0
# this baseline will be way easier to beat and thus result in a higher ppscore
NUMERIC_AS_CATEGORIC_BREAKPOINT = 15
def _calculate_model_cv_score_(df, target, feature, metric, model, **kwargs):
"Calculates the mean model score based on cross-validation"
# Sources about the used methods:
# https://scikit-learn.org/stable/modules/tree.html
# https://scikit-learn.org/stable/modules/cross_validation.html
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html
# shuffle the rows - this is important for crossvalidation
# because the crossvalidation just takes the first n lines
# if there is a strong pattern in the rows eg 0,0,0,0,1,1,1,1
# then this will lead to problems because the first cv sees mostly 0 and the later 1
# this approach might be wrong for timeseries because it might leak information
df = df.sample(frac=1, random_state=RANDOM_SEED, replace=False)
# preprocess target
if df[target].dtype == object:
le = preprocessing.LabelEncoder()
df[target] = le.fit_transform(df[target])
target_series = df[target]
else:
target_series = df[target]
# preprocess feature
if df[feature].dtype == object:
one_hot_encoder = preprocessing.OneHotEncoder()
sparse_matrix = one_hot_encoder.fit_transform(df[feature].values.reshape(-1, 1))
feature_df = sparse_matrix
else:
# reshaping needed because there is only 1 feature
feature_df = df[feature].values.reshape(-1, 1)
# Crossvalidation is stratifiedKFold for classification, KFold for regression
# CV on one core (n_job=1; default) has shown to be fastest
scores = cross_val_score(
model, feature_df, target_series, cv=CV_ITERATIONS, scoring=metric
)
return scores.mean()
def _normalized_mae_score(model_mae, naive_mae):
"Normalizes the model MAE score, given the baseline score"
# # Value range of MAE is [0, infinity), 0 is best
# 10, 5 >> 0 because worse than naive
# 10, 20 >> 0.5
# 5, 20 >> 0.75 = 1 - (mae/base_mae)
if model_mae > naive_mae:
return 0
else:
return 1 - (model_mae / naive_mae)
def _mae_normalizer(df, y, model_score):
"In case of MAE, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].median()
baseline_score = mean_absolute_error(df[y], df["naive"]) # true, pred
ppscore = _normalized_mae_score(abs(model_score), baseline_score)
return ppscore, baseline_score
def _normalized_f1_score(model_f1, baseline_f1):
"Normalizes the model F1 score, given the baseline score"
# # F1 ranges from 0 to 1
# # 1 is best
# 0.5, 0.7 = 0 because worse than naive
# 0.75, 0.5 > 0.5
#
if model_f1 < baseline_f1:
return 0
else:
scale_range = 1.0 - baseline_f1 # eg 0.3
f1_diff = model_f1 - baseline_f1 # eg 0.1
return f1_diff / scale_range # 0.1/0.3 = 0.33
def _f1_normalizer(df, y, model_score):
"In case of F1, calculates the baseline score for y and derives the PPS."
df["naive"] = df[y].value_counts().index[0]
baseline_score = f1_score(df[y], df["naive"], average="weighted")
ppscore = _normalized_f1_score(model_score, baseline_score)
return ppscore, baseline_score
TASKS = {
"regression": {
"metric_name": "mean absolute error",
"metric_key": "neg_mean_absolute_error",
"model": tree.DecisionTreeRegressor(),
"score_normalizer": _mae_normalizer,
},
"classification": {
"metric_name": "weighted F1",
"metric_key": "f1_weighted",
"model": tree.DecisionTreeClassifier(),
"score_normalizer": _f1_normalizer,
},
"predict_itself": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_constant": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
"predict_id": {
"metric_name": None,
"metric_key": None,
"model": None,
"score_normalizer": None,
},
}
def _infer_task(df, x, y):
"Returns str with the name of the inferred task based on the columns x and y"
if x == y:
return "predict_itself"
category_count = df[y].value_counts().count()
if category_count == 1:
return "predict_constant"
if category_count == 2:
return "classification"
if category_count == len(df[y]) and (
is_string_dtype(df[y]) or is_categorical_dtype(df[y])
):
return "predict_id"
if category_count <= NUMERIC_AS_CATEGORIC_BREAKPOINT and is_numeric_dtype(df[y]):
return "classification"
if is_bool_dtype(df[y]) or is_string_dtype(df[y]) or is_categorical_dtype(df[y]):
return "classification"
if | is_datetime64_any_dtype(df[y]) | pandas.api.types.is_datetime64_any_dtype |
import pandas as pd
from hdbscan import HDBSCAN
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
def prep_for_class(txt_col, f, r):
"""
prepare dataframe for clustering
:param txt_col:
:param f:
:param r:
:return:
"""
ans_ = pd.concat([pd.DataFrame(txt_col), f], axis=1)
ans_ = ans_[ans_[-1] == 0]
ans_ = ans_.drop(-1, axis=1)
ans_ = ans_.melt(id_vars='text', var_name='cluster3', value_name='value')
ans_ = ans_[ans_['value'] == 1]
ans_ = ans_.drop('value', axis=1)
ans_svm = ans_[ans_['cluster3'] > -1]
for_x = pd.concat([pd.DataFrame(txt_col), pd.DataFrame(r)], axis=1)
authorssvm = for_x.merge(ans_svm, left_on='text', right_on='text', how='right')
y = list(authorssvm['cluster3'])
X = authorssvm.drop(['cluster3', 'text'], axis=1)
X = X.fillna(0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
return X_train, X_test, y_train, y_test
def cluster_hdbscan(svmx, hdb, txt_col, f, r):
"""
cluster noise
:param svmx:
:param hdb:
:param txt_col:
:param f:
:param r:
:return:
"""
X_train, X_test, y_train, y_test = prep_for_class(txt_col, f, r)
if svmx:
clf = svm.SVC(C=1000, kernel='rbf', gamma=0.7, random_state=12)
else:
clf = RandomForestClassifier(min_samples_split=2, max_features=3, max_depth=10000, random_state=12)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
nsvm = clf.predict(r)
f = pd.get_dummies(nsvm)
with_catsvm = pd.Series([str(i) for i in list(zip(txt_col, nsvm))], name='text')
answerssvm = pd.concat([with_catsvm, f], axis=1)
answers = [answerssvm, nsvm, hdb]
return answers
def return_hdbscansvm(df, txt_col, rf=False, clust_size=15, samp_size=5, svmx=False, clust_metric='braycurtis'):
"""
complete pipeline
:param df:
:param txt_col:
:param rf:
:param clust_size:
:param samp_size:
:param svmx:
:param clust_metric:
:return:
"""
if rf or svmx:
cluster = True
else:
cluster = False
super_flat = | pd.DataFrame(df) | pandas.DataFrame |
import pandas as pd
import numpy as np
from suzieq.utils import SchemaForTable, humanize_timestamp, Schema
from suzieq.engines.base_engine import SqEngineObj
from suzieq.sqobjects import get_sqobject
from suzieq.db import get_sqdb_engine
from suzieq.exceptions import DBReadError, UserQueryError
import dateparser
from datetime import datetime
from pandas.core.groupby import DataFrameGroupBy
class SqPandasEngine(SqEngineObj):
def __init__(self, baseobj):
self.ctxt = baseobj.ctxt
self.iobj = baseobj
self.summary_row_order = []
self._summarize_on_add_field = []
self._summarize_on_add_with_query = []
self._summarize_on_add_list_or_count = []
self._summarize_on_add_stat = []
self._summarize_on_perdevice_stat = []
self._dbeng = get_sqdb_engine(baseobj.ctxt.cfg, baseobj.table, '',
None)
@property
def all_schemas(self) -> Schema:
return self.ctxt.schemas
@property
def schema(self) -> SchemaForTable:
return self.iobj.schema
@property
def cfg(self):
return self.iobj._cfg
@property
def table(self):
return self.iobj._table
def _get_ipvers(self, value: str) -> int:
"""Return the IP version in use"""
if ':' in value:
ipvers = 6
elif '.' in value:
ipvers = 4
else:
ipvers = ''
return ipvers
def _handle_user_query_str(self, df: pd.DataFrame,
query_str: str) -> pd.DataFrame:
"""Handle user query, trapping errors and returning exception
Args:
df (pd.DataFrame): The dataframe to run the query on
query_str (str): pandas query string
Raises:
UserQueryError: Exception if pandas query aborts with errmsg
Returns:
pd.DataFrame: dataframe post query
"""
if query_str:
if query_str.startswith('"') and query_str.endswith('"'):
query_str = query_str[1:-1]
try:
df = df.query(query_str).reset_index(drop=True)
except Exception as ex:
raise UserQueryError(ex)
return df
def get_valid_df(self, table: str, **kwargs) -> pd.DataFrame:
"""The heart of the engine: retrieving the data from the backing store
Args:
table (str): Name of the table to retrieve the data for
Returns:
pd.DataFrame: The data as a pandas dataframe
"""
if not self.ctxt.engine:
print("Specify an analysis engine using set engine command")
return pd.DataFrame(columns=["namespace", "hostname"])
# Thanks to things like OSPF, we cannot use self.schema here
sch = SchemaForTable(table, self.all_schemas)
phy_table = sch.get_phy_table_for_table()
columns = kwargs.pop('columns', ['default'])
addnl_fields = kwargs.pop('addnl_fields', [])
view = kwargs.pop('view', self.iobj.view)
active_only = kwargs.pop('active_only', True)
hostname = kwargs.get('hostname', [])
fields = sch.get_display_fields(columns)
key_fields = sch.key_fields()
drop_cols = []
if columns == ['*']:
drop_cols.append('sqvers')
aug_fields = sch.get_augmented_fields()
if 'timestamp' not in fields:
fields.append('timestamp')
if 'active' not in fields+addnl_fields:
addnl_fields.append('active')
drop_cols.append('active')
# Order matters. Don't put this before the missing key fields insert
for f in aug_fields:
dep_fields = sch.get_parent_fields(f)
addnl_fields += dep_fields
for fld in key_fields:
if fld not in fields+addnl_fields:
addnl_fields.insert(0, fld)
drop_cols.append(fld)
for f in addnl_fields:
if f not in fields:
# timestamp is always the last field
fields.insert(-1, f)
if self.iobj.start_time:
try:
start_time = int(dateparser.parse(
self.iobj.start_time.replace('last night', 'yesterday'))
.timestamp()*1000)
except Exception as e:
print(f"ERROR: invalid time {self.iobj.start_time}: {e}")
return | pd.DataFrame() | pandas.DataFrame |
import copy
import random
import numpy as np
import pandas as pd
import pytest
from scipy import sparse
import sklearn.datasets
import sklearn.model_selection
from autosklearn.data.feature_validator import FeatureValidator
# Fixtures to be used in this class. By default all elements have 100 datapoints
@pytest.fixture
def input_data_featuretest(request):
if request.param == 'numpy_categoricalonly_nonan':
return np.random.randint(10, size=(100, 10))
elif request.param == 'numpy_numericalonly_nonan':
return np.random.uniform(10, size=(100, 10))
elif request.param == 'numpy_mixed_nonan':
return np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
elif request.param == 'numpy_string_nonan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
['a', 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'numpy_categoricalonly_nan':
array = np.random.randint(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_numericalonly_nan':
array = np.random.uniform(10, size=(100, 10)).astype('float')
array[50, 0:5] = np.nan
# Somehow array is changed to dtype object after np.nan
return array.astype('float')
elif request.param == 'numpy_mixed_nan':
array = np.column_stack([
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 3)),
np.random.uniform(10, size=(100, 3)),
np.random.randint(10, size=(100, 1)),
])
array[50, 0:5] = np.nan
return array
elif request.param == 'numpy_string_nan':
return np.array([
['a', 'b', 'c', 'a', 'b', 'c'],
[np.nan, 'b', 'd', 'r', 'b', 'c'],
])
elif request.param == 'pandas_categoricalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
elif request.param == 'pandas_numericalonly_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='float')
elif request.param == 'pandas_mixed_nonan':
frame = pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_categoricalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='category')
elif request.param == 'pandas_numericalonly_nan':
return pd.DataFrame([
{'A': 1, 'B': 2, 'C': np.nan},
{'A': 3, 'C': np.nan},
], dtype='float')
elif request.param == 'pandas_mixed_nan':
frame = pd.DataFrame([
{'A': 1, 'B': 2, 'C': 8},
{'A': 3, 'B': 4},
], dtype='category')
frame['B'] = pd.to_numeric(frame['B'])
return frame
elif request.param == 'pandas_string_nonan':
return pd.DataFrame([
{'A': 1, 'B': 2},
{'A': 3, 'B': 4},
], dtype='string')
elif request.param == 'list_categoricalonly_nonan':
return [
['a', 'b', 'c', 'd'],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nonan':
return [
[1, 2, 3, 4],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nonan':
return [
['a', 2, 3, 4],
['b', 6, 7, 8]
]
elif request.param == 'list_categoricalonly_nan':
return [
['a', 'b', 'c', np.nan],
['e', 'f', 'c', 'd'],
]
elif request.param == 'list_numericalonly_nan':
return [
[1, 2, 3, np.nan],
[5, 6, 7, 8]
]
elif request.param == 'list_mixed_nan':
return [
['a', np.nan, 3, 4],
['b', 6, 7, 8]
]
elif 'sparse' in request.param:
# We expect the names to be of the type sparse_csc_nonan
sparse_, type_, nan_ = request.param.split('_')
if 'nonan' in nan_:
data = np.ones(3)
else:
data = np.array([1, 2, np.nan])
# Then the type of sparse
row_ind = np.array([0, 1, 2])
col_ind = np.array([1, 2, 1])
if 'csc' in type_:
return sparse.csc_matrix((data, (row_ind, col_ind)))
elif 'csr' in type_:
return sparse.csr_matrix((data, (row_ind, col_ind)))
elif 'coo' in type_:
return sparse.coo_matrix((data, (row_ind, col_ind)))
elif 'bsr' in type_:
return sparse.bsr_matrix((data, (row_ind, col_ind)))
elif 'lil' in type_:
return sparse.lil_matrix((data))
elif 'dok' in type_:
return sparse.dok_matrix(np.vstack((data, data, data)))
elif 'dia' in type_:
return sparse.dia_matrix(np.vstack((data, data, data)))
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
elif 'openml' in request.param:
_, openml_id = request.param.split('_')
X, y = sklearn.datasets.fetch_openml(data_id=int(openml_id),
return_X_y=True, as_frame=True)
return X
else:
ValueError("Unsupported indirect fixture {}".format(request.param))
# Actual checks for the features
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_numericalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_numericalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_numericalonly_nonan',
'pandas_mixed_nonan',
'pandas_numericalonly_nan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
'sparse_lil_nan',
'openml_40981', # Australian
),
indirect=True
)
def test_featurevalidator_supported_types(input_data_featuretest):
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
transformed_X = validator.transform(input_data_featuretest)
if sparse.issparse(input_data_featuretest):
assert sparse.issparse(transformed_X)
else:
assert isinstance(transformed_X, np.ndarray)
assert np.shape(input_data_featuretest) == np.shape(transformed_X)
assert np.issubdtype(transformed_X.dtype, np.number)
assert validator._is_fitted
@pytest.mark.parametrize(
'input_data_featuretest',
(
'list_categoricalonly_nonan',
'list_categoricalonly_nan',
'list_mixed_nonan',
'list_mixed_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_list(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*has invalid type object. Cast it to a valid dtype.*"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_string_nonan',
'numpy_string_nan',
),
indirect=True
)
def test_featurevalidator_unsupported_numpy(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r".*When providing a numpy array.*not supported."):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'pandas_categoricalonly_nan',
'pandas_mixed_nan',
'openml_179', # adult workclass has NaN in columns
),
indirect=True
)
def test_featurevalidator_unsupported_pandas(input_data_featuretest):
validator = FeatureValidator()
with pytest.raises(ValueError, match=r"Categorical features in a dataframe.*missing/NaN"):
validator.fit(input_data_featuretest)
@pytest.mark.parametrize(
'input_data_featuretest',
(
'numpy_categoricalonly_nonan',
'numpy_mixed_nonan',
'numpy_categoricalonly_nan',
'numpy_mixed_nan',
'pandas_categoricalonly_nonan',
'pandas_mixed_nonan',
'list_numericalonly_nonan',
'list_numericalonly_nan',
'sparse_bsr_nonan',
'sparse_bsr_nan',
'sparse_coo_nonan',
'sparse_coo_nan',
'sparse_csc_nonan',
'sparse_csc_nan',
'sparse_csr_nonan',
'sparse_csr_nan',
'sparse_dia_nonan',
'sparse_dia_nan',
'sparse_dok_nonan',
'sparse_dok_nan',
'sparse_lil_nonan',
),
indirect=True
)
def test_featurevalidator_fitontypeA_transformtypeB(input_data_featuretest):
"""
Check if we can fit in a given type (numpy) yet transform
if the user changes the type (pandas then)
This is problematic only in the case we create an encoder
"""
validator = FeatureValidator()
validator.fit(input_data_featuretest, input_data_featuretest)
if isinstance(input_data_featuretest, pd.DataFrame):
complementary_type = input_data_featuretest.to_numpy()
elif isinstance(input_data_featuretest, np.ndarray):
complementary_type = pd.DataFrame(input_data_featuretest)
elif isinstance(input_data_featuretest, list):
complementary_type = | pd.DataFrame(input_data_featuretest) | pandas.DataFrame |
import unittest
from unittest.mock import patch, PropertyMock
import time
import mt5_correlation.correlation as correlation
import pandas as pd
from datetime import datetime, timedelta
from test_mt5 import Symbol
import random
import os
class TestCorrelation(unittest.TestCase):
# Mock symbols. 4 Symbols, 3 visible.
mock_symbols = [Symbol(name='SYMBOL1', visible=True),
Symbol(name='SYMBOL2', visible=True),
Symbol(name='SYMBOL3', visible=False),
Symbol(name='SYMBOL4', visible=True),
Symbol(name='SYMBOL5', visible=True)]
# Start and end date for price data and mock prices: base; correlated; and uncorrelated.
start_date = None
end_date = None
price_columns = None
mock_base_prices = None
mock_correlated_prices = None
mock_uncorrelated_prices = None
def setUp(self):
"""
Creates some price data fro use in tests
:return:
"""
# Start and end date for price data and mock price dataframes. One for: base; correlated; uncorrelated and
# different dates.
self.start_date = datetime(2021, 1, 1, 1, 5, 0)
self.end_date = datetime(2021, 1, 1, 11, 30, 0)
self.price_columns = ['time', 'close']
self.mock_base_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_uncorrelated_prices = pd.DataFrame(columns=self.price_columns)
self.mock_correlated_different_dates = pd.DataFrame(columns=self.price_columns)
self.mock_inverse_correlated_prices = pd.DataFrame(columns=self.price_columns)
# Build the price data for the test. One price every 5 minutes for 500 rows. Base will use min for price,
# correlated will use min + 5 and uncorrelated will use random
for date in (self.start_date + timedelta(minutes=m) for m in range(0, 500*5, 5)):
self.mock_base_prices = self.mock_base_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute]]))
self.mock_correlated_prices = \
self.mock_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, date.minute + 5]]))
self.mock_uncorrelated_prices = \
self.mock_uncorrelated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, random.randint(0, 1000000)]]))
self.mock_correlated_different_dates = \
self.mock_correlated_different_dates.append(pd.DataFrame(columns=self.price_columns,
data=[[date + timedelta(minutes=100),
date.minute + 5]]))
self.mock_inverse_correlated_prices = \
self.mock_inverse_correlated_prices.append(pd.DataFrame(columns=self.price_columns,
data=[[date, (date.minute + 5) * -1]]))
@patch('mt5_correlation.mt5.MetaTrader5')
def test_calculate(self, mock):
"""
Test the calculate method. Uses mock for MT5 symbols and prices.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Correlation class
cor = correlation.Correlation(monitoring_threshold=1, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We don't have a SYMBOL3 as this is set as not visible. Correlations should be as follows:
# SYMBOL1:SYMBOL2 should be fully correlated (1)
# SYMBOL1:SYMBOL4 should be uncorrelated (0)
# SYMBOL1:SYMBOL5 should be negatively correlated
# SYMBOL2:SYMBOL5 should be negatively correlated
# We will not use p_value as the last set uses random numbers so p value will not be useful.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_uncorrelated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Test the output. We should have 6 rows. S1:S2 c=1, S1:S4 c<1, S1:S5 c=-1, S2:S5 c=-1. We are not checking
# S2:S4 or S4:S5
self.assertEqual(len(cor.coefficient_data.index), 6, "There should be six correlations rows calculated.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL2'), 1,
"The correlation for SYMBOL1:SYMBOL2 should be 1.")
self.assertTrue(cor.get_base_coefficient('SYMBOL1', 'SYMBOL4') < 1,
"The correlation for SYMBOL1:SYMBOL4 should be <1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL1', 'SYMBOL5'), -1,
"The correlation for SYMBOL1:SYMBOL5 should be -1.")
self.assertEqual(cor.get_base_coefficient('SYMBOL2', 'SYMBOL5'), -1,
"The correlation for SYMBOL2:SYMBOL5 should be -1.")
# Monitoring threshold is 1 and we are monitoring inverse. Get filtered correlations. There should be 3 (S1:S2,
# S1:S5 and S2:S5)
self.assertEqual(len(cor.filtered_coefficient_data.index), 3,
"There should be 3 rows in filtered coefficient data when we are monitoring inverse "
"correlations.")
# Now aren't monitoring inverse correlations. There should only be one correlation when filtered
cor.monitor_inverse = False
self.assertEqual(len(cor.filtered_coefficient_data.index), 1,
"There should be only 1 rows in filtered coefficient data when we are not monitoring inverse "
"correlations.")
# Now were going to recalculate, but this time SYMBOL1:SYMBOL2 will have non overlapping dates and coefficient
# should be None. There shouldn't be a row. We should have correlations for S1:S4, S1:S5 and S4:S5
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_different_dates,
self.mock_correlated_prices, self.mock_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
self.assertEqual(len(cor.coefficient_data.index), 3, "There should be three correlations rows calculated.")
self.assertEqual(cor.coefficient_data.iloc[0, 2], 1, "The correlation for SYMBOL1:SYMBOL4 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[1, 2], 1, "The correlation for SYMBOL1:SYMBOL5 should be 1.")
self.assertEqual(cor.coefficient_data.iloc[2, 2], 1, "The correlation for SYMBOL4:SYMBOL5 should be 1.")
# Get the price data used to calculate the coefficients for symbol 1. It should match mock_base_prices.
price_data = cor.get_price_data('SYMBOL1')
self.assertTrue(price_data.equals(self.mock_base_prices), "Price data returned post calculation should match "
"mock price data.")
def test_calculate_coefficient(self):
"""
Tests the coefficient calculation.
:return:
"""
# Correlation class
cor = correlation.Correlation()
# Test 2 correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_prices)
self.assertEqual(coefficient, 1, "Coefficient should be 1.")
# Test 2 uncorrelated sets. Set p value to 1 to force correlation to be returned.
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_uncorrelated_prices, max_p_value=1)
self.assertTrue(coefficient < 1, "Coefficient should be < 1.")
# Test 2 sets where prices dont overlap
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_correlated_different_dates)
self.assertTrue(coefficient < 1, "Coefficient should be None.")
# Test 2 inversely correlated sets
coefficient = cor.calculate_coefficient(self.mock_base_prices, self.mock_inverse_correlated_prices)
self.assertEqual(coefficient, -1, "Coefficient should be -1.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_get_ticks(self, mock):
"""
Test that caching works. For the purpose of this test, we can use price data rather than tick data.
Mock 2 different sets of prices. Get three times. Base, One within cache threshold and one outside. Set 1
should match set 2 but differ from set 3.
:param mock:
:return:
"""
# Correlation class to test
cor = correlation.Correlation()
# Mock the tick data to contain 2 different sets. Then get twice. They should match as the data was cached.
mock.copy_ticks_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices]
# We need to start and stop the monitor as this will set the cache time
cor.start_monitor(interval=10, calculation_params={'from': 10, 'min_prices': 0, 'max_set_size_diff_pct': 0,
'overlap_pct': 0, 'max_p_value': 1}, cache_time=3)
cor.stop_monitor()
# Get the ticks within cache time and check that they match
base_ticks = cor.get_ticks('SYMBOL1', None, None)
cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(base_ticks.equals(cached_ticks),
"Both sets of tick data should match as set 2 came from cache.")
# Wait 3 seconds
time.sleep(3)
# Retrieve again. This one should be different as the cache has expired.
non_cached_ticks = cor.get_ticks('SYMBOL1', None, None)
self.assertTrue(not base_ticks.equals(non_cached_ticks),
"Both sets of tick data should differ as cached data had expired.")
@patch('mt5_correlation.mt5.MetaTrader5')
def test_start_monitor(self, mock):
"""
Test that starting the monitor and running for 2 seconds produces two sets of coefficient history when using an
interval of 1 second.
:param mock:
:return:
"""
# Mock symbol return values
mock.symbols_get.return_value = self.mock_symbols
# Create correlation class. We will set a divergence threshold so that we can test status.
cor = correlation.Correlation(divergence_threshold=0.8, monitor_inverse=True)
# Calculate for price data. We should have 100% matching dates in sets. Get prices should be called 4 times.
# We dont have a SYMBOL2 as this is set as not visible. All pairs should be correlated for the purpose of this
# test.
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# We will build some tick data for each symbol and patch it in. Tick data will be from 10 seconds ago to now.
# We only need to patch in one set of tick data for each symbol as it will be cached.
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s2 = pd.DataFrame(columns=columns)
tick_data_s4 = pd.DataFrame(columns=columns)
tick_data_s5 = pd.DataFrame(columns=columns)
now = datetime.now()
price_base = 1
while starttime < now:
tick_data_s1 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.5]]))
tick_data_s2 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.1]]))
tick_data_s4 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * 0.25]]))
tick_data_s5 = tick_data_s1.append(pd.DataFrame(columns=columns, data=[[starttime, price_base * -0.25]]))
starttime = starttime + timedelta(milliseconds=10*random.randint(0, 100))
price_base += 1
# Patch it in
mock.copy_ticks_range.side_effect = [tick_data_s1, tick_data_s2, tick_data_s4, tick_data_s5]
# Start the monitor. Run every second. Use ~10 and ~5 seconds of data. Were not testing the overlap and price
# data quality metrics here as that is set elsewhere so these can be set to not take effect. Set cache level
# high and don't use autosave. Timer runs in a separate thread so test can continue after it has started.
cor.start_monitor(interval=1, calculation_params=[{'from': 0.66, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1},
{'from': 0.33, 'min_prices': 0,
'max_set_size_diff_pct': 0, 'overlap_pct': 0,
'max_p_value': 1}], cache_time=100, autosave=False)
# Wait 2 seconds so timer runs twice
time.sleep(2)
# Stop the monitor
cor.stop_monitor()
# We should have 2 coefficients calculated for each symbol pair (6), for each date_from value (2),
# for each run (2) so 24 in total.
self.assertEqual(len(cor.coefficient_history.index), 24)
# We should have 2 coefficients calculated for a single symbol pair and timeframe
self.assertEqual(len(cor.get_coefficient_history({'Symbol 1': 'SYMBOL1', 'Symbol 2': 'SYMBOL2',
'Timeframe': 0.66})),
2, "We should have 2 history records for SYMBOL1:SYMBOL2 using the 0.66 min timeframe.")
# The status should be DIVERGED for SYMBOL1:SYMBOL2 and CORRELATED for SYMBOL1:SYMBOL4 and SYMBOL2:SYMBOL4.
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL2') == correlation.STATUS_DIVERGED)
self.assertTrue(cor.get_last_status('SYMBOL1', 'SYMBOL4') == correlation.STATUS_CORRELATED)
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL4') == correlation.STATUS_CORRELATED)
# We are monitoring inverse correlations, status for SYMBOL1:SYMBOL5 should be DIVERGED
self.assertTrue(cor.get_last_status('SYMBOL2', 'SYMBOL5') == correlation.STATUS_DIVERGED)
@patch('mt5_correlation.mt5.MetaTrader5')
def test_load_and_save(self, mock):
"""Calculate and run monitor for a few seconds. Store the data. Save it, load it then compare against stored
data."""
# Correlation class
cor = correlation.Correlation()
# Patch symbol and price data, then calculate
mock.symbols_get.return_value = self.mock_symbols
mock.copy_rates_range.side_effect = [self.mock_base_prices, self.mock_correlated_prices,
self.mock_correlated_prices, self.mock_inverse_correlated_prices]
cor.calculate(date_from=self.start_date, date_to=self.end_date, timeframe=5, min_prices=100,
max_set_size_diff_pct=100, overlap_pct=100, max_p_value=1)
# Patch the tick data
columns = ['time', 'ask']
starttime = datetime.now() - timedelta(seconds=10)
tick_data_s1 = pd.DataFrame(columns=columns)
tick_data_s3 = pd.DataFrame(columns=columns)
tick_data_s4 = | pd.DataFrame(columns=columns) | pandas.DataFrame |
from bs4 import BeautifulSoup
import requests
import requests.exceptions
import urllib
import urllib.parse
from collections import deque
import re
import sys
import pandas as pd
df_urls = pd.DataFrame()
df_urls = | pd.read_table('processing_A.csv', sep=',') | pandas.read_table |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = | Series(arr, index=self.string_index, name="a") | pandas.Series |
"""
permutation feature importance
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
with open("../pfi_mel40_spikegram40_5.pkl", "rb") as f:
data = pickle.load(f)
max_cer, cers = data[0], data[1:]
df = | pd.DataFrame(columns=['feature', 'pfi']) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
from covsirphy.util.error import SubsetNotFoundError
from covsirphy.cleaning.cbase import CleaningBase
class OxCGRTData(CleaningBase):
"""
Data cleaning of OxCGRT dataset.
Args:
filename (str or None): CSV filename of the dataset
data (pandas.DataFrame or None):
Index
reset index
Columns
- Date: Observation date
- ISO3: ISO 3166-1 alpha-3, like JPN
- Country: country/region name
- variables defined by @variables
citation (str or None): citation or None (empty)
variables (list[str] or None): variables to parse or None (use default variables listed as follows)
- School_closing
- Workplace_closing
- Cancel_events
- Gatherings_restrictions
- Transport_closing
- Stay_home_restrictions
- Internal_movement_restrictions
- International_movement_restrictions
- Information_campaigns
- Testing_policy
- Contact_tracing
- Stringency_index
Note:
Either @filename (high priority) or @data must be specified.
Note:
The default policy indices (Overall etc.) are from README.md and documentation/index_methodology.md in
https://github.com/OxCGRT/covid-policy-tracker/
"""
OXCGRT_VARS = [
"School_closing",
"Workplace_closing",
"Cancel_events",
"Gatherings_restrictions",
"Transport_closing",
"Stay_home_restrictions",
"Internal_movement_restrictions",
"International_movement_restrictions",
"Information_campaigns",
"Testing_policy",
"Contact_tracing",
"Stringency_index"
]
# Indicators except for Stringency index
OXCGRT_VARS_INDICATORS = [v for v in OXCGRT_VARS if v != "Stringency_index"]
def __init__(self, filename=None, data=None, citation=None, variables=None):
self._variables = variables or self.OXCGRT_VARS[:]
super().__init__(filename=filename, data=data, citation=citation, variables=self._variables)
def _cleaning(self):
"""
Perform data cleaning of the raw data.
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pandas.Timestamp): Observation date
- ISO3 (str): ISO 3166-1 alpha-3, like JPN
- Country (pandas.Category): country/region name
- Province (pandas.Category): province/prefecture/state name
- variables defined by OxCGRTData(variables)
"""
df = self._raw.copy()
# Prepare data for Greenland
grl_df = df.loc[df[self.COUNTRY] == "Denmark"].copy()
grl_df.loc[:, [self.ISO3, self.COUNTRY]] = ["GRL", "Greenland"]
df = pd.concat([df, grl_df], sort=True, ignore_index=True)
# Confirm the expected columns are in raw data
self._ensure_dataframe(df, name="the raw data", columns=self._raw_cols)
# Read date records
df[self.DATE] = | pd.to_datetime(df[self.DATE]) | pandas.to_datetime |
'''
text-mining: menghitung frekuensi kemunculan kata
'''
import os
from os import system, name
import csv
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
import pandas as pd
from datetime import datetime
# NLTK
class Main():
# open log to write report process
def __init__(self, log):
super(Main, self)
self.log = open(log+'.txt', "w")
self.log_output = open('output_'+log+'.txt', "w")
def logSukses(self, textInfo, textContent):
self.log.write("SUCCESS: \t {textInfo} \"{textContent}\"\n".format(textInfo=textInfo, textContent=textContent))
self.log.flush()
print("SUCCESS: \t {textInfo} \"{textContent}\"\n".format(textInfo=textInfo, textContent=textContent))
def logError(self, errorId, textInfo, textContent):
self.log.write("ERROR {errorId}: \t {textInfo} \"{textContent}\"\n".format(errorId=errorId, textInfo=textInfo, textContent=textContent))
self.log.flush()
print("ERROR {errorId}: \t {textInfo} \"{textContent}\"\n".format(errorId=errorId, textInfo=textInfo, textContent=textContent))
# tokenize text, nltk initiation to text
def tokenize(self, text):
try:
self.text = text
self.token = word_tokenize(self.text)
self.logSukses('tokenize', self.text)
except expression as identifier:
self.logError('101', 'tokenize', self.text)
# count frequency of words
def freqdist(self):
try:
self.fdist = FreqDist(self.token)
self.logSukses('freqdist', self.text)
return self.fdist
except expression as identifier:
self.logError('102', 'freqdist', self.text)
# print frequency (key and value), and write txt
def freqword(self, dictWords):
try:
self.dictWords = dictWords
self.logSukses('writing frequency of words', '')
self.log_output.seek(0)
for i in self.dictWords:
# print(i, self.dictWords[i])
self.log_output.write(i + " " + str(self.dictWords[i]) + "\n")
self.log_output.truncate()
self.log_output.flush()
except expression as identifier:
self.logError('103', 'writing frequency of words failed', '')
def closeLog(self):
self.log.close()
self.log_output.close()
# Dataframe (read .xlsx)
class readData():
def __init__(self):
super(readData, self)
# open file .xlsx
def open(self, data_file):
self.data_file = pd.read_excel(data_file)
def count(self):
self.count_index = len(self.data_file.index)
return self.count_index
# return value of column
def column(self, column_name):
# read dataframe with specific column_name
data = | pd.DataFrame(self.data_file, columns=[column_name]) | pandas.DataFrame |
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import statistics as st
import csv
from corems.mass_spectra.calc.GC_Calc import GC_Calculations
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecBase
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecCentroidLowRes
from corems.chroma_peak.factory.ChromaPeakClasses import GCPeak
from corems.mass_spectra.calc import SignalProcessing as sp
class LossFinderTargeted(GC_Calculations):
def __init__(self, ref_file = 'Unknown', noise_cutoff = 'Unknown', tolerance = "Unknown"):
self.tolerance = float()
self.noise_cutoff = float()
self.offset_hits = {}
self.mz_filtered = {}
self.abund_filtered = {}
self.mz_count = float()
#self._mz_exp = MassSpecBase._mspeaks
#self._abundance = MassSpecBase._abundance
def ms_info_get(self, mass_spectra_obj):
mz_dict = {}
abund = {}
self.mz_count = sum([len(mz) for mz in mass_spectra_obj.values()])
for scan_number, ms_obj in mass_spectra_obj.items():
mz_dict.update({scan_number:ms_obj.mz_exp})
abund.update({scan_number:ms_obj.abundance})
return mz_dict, abund
def loss_ref_get(self, file_location, tolerance):
offset_ref = {}
range_ref = {}
with open(file_location) as ref:
ref_reader = csv.reader(ref, delimiter=',')
next(ref_reader)
for name, mass_offset in ref_reader:
offset_ref.setdefault(name,float(mass_offset))
for key, val in offset_ref.items():
range_ref.update({key:(val-tolerance, val+tolerance)})
return range_ref
def threshold_filter(self, mz_dict, Intensity, noise_cutoff):
for scan_number, info in Intensity.items():
cutoff = st.mean(Intensity[scan_number])*noise_cutoff
noise = set([peak for peak in Intensity[scan_number] if peak <=cutoff])
mz_dict[scan_number] = [mz for mz, peak in zip(mz_dict[scan_number], Intensity[scan_number]) if peak not in noise ]
Intensity[scan_number] = [peak for peak in Intensity[scan_number] if peak >= cutoff]
return mz_dict , Intensity
def mz_pair_checker(self, chem, lower, upper, mz1, spectrum, Intensity, scan_number):
for mz2 in spectrum:
if mz1 > mz2 and lower <= abs(mz1-mz2) <= upper:
if chem not in self.offset_hits.keys():
self.offset_hits.update( {chem:[((mz2, mz1, Intensity[spectrum.index(mz2)] , Intensity[spectrum.index(mz1)], chem, scan_number ))]} )
else:
self.offset_hits[chem].append( ((mz2,mz1, Intensity[spectrum.index(mz2)] , Intensity[spectrum.index(mz1)], chem, scan_number )) )
def findpeakoffset(self, range_ref, mz_filtered, abund_filtered):
count = 0
for chem, ref_pair in range_ref.items():
for scan_number in mz_filtered.keys():
while count < len(mz_filtered[scan_number])-1:
#while count < 2:
self.mz_pair_checker(chem, ref_pair[0], ref_pair[1], mz_filtered[scan_number][count], mz_filtered[scan_number], abund_filtered[scan_number], scan_number)
count+=1
#print(self.offset_hits)
count=0
return self.offset_hits
def LF_out(self, LF_dict, mz_count):
data = pd.DataFrame()
for chem, stats in LF_dict.items():
data = data.append( | pd.DataFrame(stats) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from finquant.moving_average import compute_ma, sma, ema, sma_std, ema_std
from finquant.moving_average import plot_bollinger_band
def test_sma():
orig = np.array(
[
[np.nan, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
[np.nan, 0.5, 2.5, 6.5, 12.5, 20.5, 30.5, 42.5, 56.5, 72.5],
]
)
dforig = | pd.DataFrame({"0": orig[0], "1": orig[1]}) | pandas.DataFrame |
"""
Display information about ctapipe output files (DL1 or DL2)
"""
from pathlib import Path
import tables
import yaml
from astropy.table import Table
from ctapipe.tools.utils import get_parser
def unflatten(dictionary, separator=" "):
""" turn flattened dict keys into nested """
hierarch_dict = dict()
for key, value in dictionary.items():
parts = key.split(separator)
tmp_dict = hierarch_dict
for part in parts[:-1]:
if part not in tmp_dict:
tmp_dict[part] = dict()
tmp_dict = tmp_dict[part]
tmp_dict[parts[-1]] = value
return hierarch_dict
def fileinfo(args):
"""
Display information about ctapipe output files (DL1 or DL2 in HDF5 format).
Optionally create an index table from all headers
"""
info_total = {} # accumulated info for table output
for filename in args.files:
info = {}
# prevent failure if a non-file is given (e.g. a directory)
if Path(filename).is_file() is False:
info[filename] = "not a file"
elif tables.is_hdf5_file(filename) is not True:
info[filename] = "unknown file type"
else:
try:
with tables.open_file(filename, mode="r") as infile:
# pylint: disable=W0212,E1101
attrs = {
name: str(infile.root._v_attrs[name])
for name in infile.root._v_attrs._f_list()
}
if args.flat:
info[filename] = attrs
else:
info[filename] = unflatten(attrs)
if args.output_table:
info_total[filename] = attrs
except tables.exceptions.HDF5ExtError as err:
info[filename] = f"ERROR {err}"
print(yaml.dump(info, indent=4))
if args.output_table:
# use pandas' ability to convert a dict of flat values to a table
import pandas as pd # pylint: disable=C0415
dataframe = | pd.DataFrame(info_total) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy import stats
from sklearn.linear_model import Ridge, RidgeCV
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error, make_scorer
# In[2]:
def calculate_pearson(df):
correlations = {}
numerical_features = df.select_dtypes(exclude = ["object"]).columns
numerical_features = numerical_features.drop("cod_municipio")
for i in numerical_features:
corr = stats.pearsonr(df[i], df['ideb'])[0]
correlations[i] = corr
df_corr = pd.DataFrame(list(correlations.items()), columns=['feature', 'correlation_with_ideb'])
df_corr = df_corr.dropna()
return df_corr
# In[3]:
def calculate_categorical_correlation(df):
categorical_features = df.select_dtypes(include = ["object"]).columns
return categorical_features
# # Puxa dados do CSV de cada integrante do grupo
# ### Dados Alexandre
# In[4]:
path = '../../data/'
# In[5]:
#Dados iniciais
alexandre_inicio_2015 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2015_ai.csv')
alexandre_inicio_2017 = pd.read_csv(path + 'bases_ale/anos_iniciais/ideb_municipios_2017_ai.csv')
# Dados finais
alexandre_final_2015 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2015_af.csv')
alexandre_final_2017 = pd.read_csv(path + 'bases_ale/anos_finais/ideb_municipios_2017_af.csv')
# ### Dados Lidia
# In[6]:
#Dados iniciais
lidia_inicio_2007 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2007_ai.csv')
lidia_inicio_2009 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2009_ai.csv')
lidia_inicio_2011 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2011_ai.csv')
lidia_inicio_2013 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2013_ai.csv')
lidia_inicio_2015 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2015_ai.csv')
lidia_inicio_2017 = pd.read_csv(path + 'bases_lidia/anos_iniciais/ideb_escola_2017_ai.csv')
# Dados finais
lidia_final_2007 = pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2007_af.csv')
lidia_final_2009 = | pd.read_csv(path + 'bases_lidia/anos_finais/ideb_escola_2009_af.csv') | pandas.read_csv |
from copy import deepcopy
import os
import re
import math
from sktime.forecasting.model_selection import (
ExpandingWindowSplitter,
SlidingWindowSplitter,
)
from pycaret.internal.pycaret_experiment.utils import highlight_setup, MLUsecase
from pycaret.internal.pycaret_experiment.supervised_experiment import (
_SupervisedExperiment,
)
from pycaret.internal.pipeline import (
estimator_pipeline,
get_pipeline_fit_kwargs,
)
from pycaret.internal.utils import (
color_df,
SeasonalPeriod,
TSModelTypes,
get_function_params,
)
import pycaret.internal.patches.sklearn
import pycaret.internal.patches.yellowbrick
from pycaret.internal.logging import get_logger
from pycaret.internal.Display import Display
from pycaret.internal.distributions import *
from pycaret.internal.validation import *
from pycaret.internal.tunable import TunableMixin
import pycaret.containers.metrics.time_series
import pycaret.containers.models.time_series
import pycaret.internal.preprocess
import pycaret.internal.persistence
import pandas as pd # type: ignore
from pandas.io.formats.style import Styler
import numpy as np # type: ignore
import datetime
import time
import gc
from sklearn.base import clone # type: ignore
from typing import List, Tuple, Any, Union, Optional, Dict, Generator
import warnings
from IPython.utils import io
import traceback
import plotly.express as px # type: ignore
import plotly.graph_objects as go # type: ignore
import logging
from sklearn.base import clone # type: ignore
from sklearn.model_selection._validation import _aggregate_score_dicts # type: ignore
from sklearn.model_selection import check_cv, ParameterGrid, ParameterSampler # type: ignore
from sklearn.model_selection._search import _check_param_grid # type: ignore
from sklearn.metrics._scorer import get_scorer, _PredictScorer # type: ignore
from collections import defaultdict
from functools import partial
from scipy.stats import rankdata # type: ignore
from joblib import Parallel, delayed # type: ignore
from sktime.forecasting.base import ForecastingHorizon
from sktime.utils.validation.forecasting import check_y_X # type: ignore
from sktime.forecasting.model_selection import SlidingWindowSplitter # type: ignore
from pycaret.internal.tests.time_series import test_
from pycaret.internal.plots.time_series import plot_
warnings.filterwarnings("ignore")
LOGGER = get_logger()
# def _get_cv_n_folds(y, cv) -> int:
# """
# Get the number of folds for time series
# cv must be of type SlidingWindowSplitter or ExpandingWindowSplitter
# TODO: Fix this inside sktime and replace this with sktime method [1]
# Ref:
# [1] https://github.com/alan-turing-institute/sktime/issues/632
# """
# n_folds = int((len(y) - cv.initial_window) / cv.step_length)
# return n_folds
def get_folds(cv, y) -> Generator[Tuple[pd.Series, pd.Series], None, None]:
"""
Returns the train and test indices for the time series data
"""
# https://github.com/alan-turing-institute/sktime/blob/main/examples/window_splitters.ipynb
for train_indices, test_indices in cv.split(y):
# print(f"Train Indices: {train_indices}, Test Indices: {test_indices}")
yield train_indices, test_indices
def cross_validate_ts(
forecaster,
y: pd.Series,
X: Optional[Union[pd.Series, pd.DataFrame]],
cv,
scoring: Dict[str, Union[str, _PredictScorer]],
fit_params,
n_jobs,
return_train_score,
error_score=0,
verbose: int = 0,
**additional_scorer_kwargs,
) -> Dict[str, np.array]:
"""Performs Cross Validation on time series data
Parallelization is based on `sklearn` cross_validate function [1]
Ref:
[1] https://github.com/scikit-learn/scikit-learn/blob/0.24.1/sklearn/model_selection/_validation.py#L246
Parameters
----------
forecaster : [type]
Time Series Forecaster that is compatible with sktime
y : pd.Series
The variable of interest for forecasting
X : Optional[Union[pd.Series, pd.DataFrame]]
Exogenous Variables
cv : [type]
[description]
scoring : Dict[str, Union[str, _PredictScorer]]
Scoring Dictionary. Values can be valid strings that can be converted to
callable metrics or the callable metrics directly
fit_params : [type]
Fit parameters to be used when training
n_jobs : [type]
Number of cores to use to parallelize. Refer to sklearn for details
return_train_score : [type]
Should the training scores be returned. Unused for now.
error_score : int, optional
Unused for now, by default 0
verbose : int
Sets the verbosity level. Unused for now
additional_scorer_kwargs: Dict[str, Any]
Additional scorer kwargs such as {`sp`:12} required by metrics like MASE
Returns
-------
[type]
[description]
Raises
------
Error
If fit and score raises any exceptions
"""
try:
# # For Debug
# n_jobs = 1
scoring = _get_metrics_dict_ts(scoring)
parallel = Parallel(n_jobs=n_jobs)
out = parallel(
delayed(_fit_and_score)(
forecaster=clone(forecaster),
y=y,
X=X,
scoring=scoring,
train=train,
test=test,
parameters=None,
fit_params=fit_params,
return_train_score=return_train_score,
error_score=error_score,
**additional_scorer_kwargs,
)
for train, test in get_folds(cv, y)
)
# raise key exceptions
except Exception:
raise
# Similar to parts of _format_results in BaseGridSearch
(test_scores_dict, fit_time, score_time, cutoffs) = zip(*out)
test_scores = _aggregate_score_dicts(test_scores_dict)
return test_scores, cutoffs
def _get_metrics_dict_ts(
metrics_dict: Dict[str, Union[str, _PredictScorer]]
) -> Dict[str, _PredictScorer]:
"""Returns a metrics dictionary in which all values are callables
of type _PredictScorer
Parameters
----------
metrics_dict : A metrics dictionary in which some values can be strings.
If the value is a string, the corresponding callable metric is returned
e.g. Dictionary Value of 'neg_mean_absolute_error' will return
make_scorer(mean_absolute_error, greater_is_better=False)
"""
return_metrics_dict = {}
for k, v in metrics_dict.items():
if isinstance(v, str):
return_metrics_dict[k] = get_scorer(v)
else:
return_metrics_dict[k] = v
return return_metrics_dict
def _fit_and_score(
forecaster,
y: pd.Series,
X: Optional[Union[pd.Series, pd.DataFrame]],
scoring: Dict[str, Union[str, _PredictScorer]],
train,
test,
parameters,
fit_params,
return_train_score,
error_score=0,
**additional_scorer_kwargs,
):
"""Fits the forecaster on a single train split and scores on the test split
Similar to _fit_and_score from `sklearn` [1] (and to some extent `sktime` [2]).
Difference is that [1] operates on a single fold only, whereas [2] operates on all cv folds.
Ref:
[1] https://github.com/scikit-learn/scikit-learn/blob/0.24.1/sklearn/model_selection/_validation.py#L449
[2] https://github.com/alan-turing-institute/sktime/blob/v0.5.3/sktime/forecasting/model_selection/_tune.py#L95
Parameters
----------
forecaster : [type]
Time Series Forecaster that is compatible with sktime
y : pd.Series
The variable of interest for forecasting
X : Optional[Union[pd.Series, pd.DataFrame]]
Exogenous Variables
scoring : Dict[str, Union[str, _PredictScorer]]
Scoring Dictionary. Values can be valid strings that can be converted to
callable metrics or the callable metrics directly
train : [type]
Indices of training samples.
test : [type]
Indices of test samples.
parameters : [type]
Parameter to set for the forecaster
fit_params : [type]
Fit parameters to be used when training
return_train_score : [type]
Should the training scores be returned. Unused for now.
error_score : int, optional
Unused for now, by default 0
**additional_scorer_kwargs: Dict[str, Any]
Additional scorer kwargs such as {`sp`:12} required by metrics like MASE
Raises
------
ValueError
When test indices do not match predicted indices. This is only for
for internal checks and should not be raised when used by external users
"""
if parameters is not None:
forecaster.set_params(**parameters)
y_train, y_test = y[train], y[test]
X_train = None if X is None else X[train]
X_test = None if X is None else X[test]
#### Fit the forecaster ----
start = time.time()
try:
forecaster.fit(y_train, X_train, **fit_params)
except Exception as error:
logging.error(f"Fit failed on {forecaster}")
logging.error(error)
if error_score == "raise":
raise
fit_time = time.time() - start
#### Determine Cutoff ----
# NOTE: Cutoff is available irrespective of whether fit passed or failed
cutoff = forecaster.cutoff
#### Score the model ----
lower = pd.Series([])
upper = pd.Series([])
if forecaster.is_fitted:
y_pred, lower, upper = get_predictions_with_intervals(
forecaster=forecaster, X_test=X_test
)
if (y_test.index.values != y_pred.index.values).any():
print(
f"\t y_train: {y_train.index.values},"
f"\n\t y_test: {y_test.index.values}"
)
print(f"\t y_pred: {y_pred.index.values}")
raise ValueError(
"y_test indices do not match y_pred_indices or split/prediction "
"length does not match forecast horizon."
)
start = time.time()
fold_scores = {}
scoring = _get_metrics_dict_ts(scoring)
# SP should be passed from outside in additional_scorer_kwargs already
additional_scorer_kwargs = update_additional_scorer_kwargs(
initial_kwargs=additional_scorer_kwargs,
y_train=y_train,
lower=lower,
upper=upper,
)
for scorer_name, scorer in scoring.items():
if forecaster.is_fitted:
# get all kwargs in additional_scorer_kwargs
# that correspond to parameters in function signature
kwargs = {
**{
k: v
for k, v in additional_scorer_kwargs.items()
if k in get_function_params(scorer._score_func)
},
**scorer._kwargs,
}
metric = scorer._score_func(y_true=y_test, y_pred=y_pred, **kwargs)
else:
metric = None
fold_scores[scorer_name] = metric
score_time = time.time() - start
return fold_scores, fit_time, score_time, cutoff
class BaseGridSearch:
"""
Parallelization is based predominantly on [1]. Also similar to [2]
Ref:
[1] https://github.com/scikit-learn/scikit-learn/blob/0.24.1/sklearn/model_selection/_search.py#L795
[2] https://github.com/scikit-optimize/scikit-optimize/blob/v0.8.1/skopt/searchcv.py#L410
"""
def __init__(
self,
forecaster,
cv,
n_jobs=None,
pre_dispatch=None,
refit: bool = False,
refit_metric: str = "smape",
scoring=None,
verbose=0,
error_score=None,
return_train_score=None,
):
self.forecaster = forecaster
self.cv = cv
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.refit = refit
self.refit_metric = refit_metric
self.scoring = scoring
self.verbose = verbose
self.error_score = error_score
self.return_train_score = return_train_score
self.best_params_ = {}
self.cv_results_ = {}
def fit(
self,
y: pd.Series,
X: Optional[pd.DataFrame] = None,
additional_scorer_kwargs: Optional[Dict[str, Any]] = None,
**fit_params,
):
"""[summary]
Parameters
----------
y : pd.Series
Target
X : Optional[pd.DataFrame], optional
Exogenous Variables, by default None
additional_scorer_kwargs: Dict[str, Any]
Additional scorer kwargs such as {`sp`:12} required by metrics like MASE
**fit_params: Dict[str, Any]
Additional params to pass to fit
Returns
-------
[type]
[description]
Raises
------
ValueError
[description]
"""
if additional_scorer_kwargs is None:
additional_scorer_kwargs = {}
y, X = check_y_X(y, X)
# validate cross-validator
cv = check_cv(self.cv)
base_forecaster = clone(self.forecaster)
# This checker is sktime specific and only support 1 metric
# Removing for now since we can have multiple metrics
# TODO: Add back later if it supports multiple metrics
# scoring = check_scoring(self.scoring)
# Multiple metrics supported
scorers = self.scoring # Dict[str, Union[str, scorer]] Not metrics container
scorers = _get_metrics_dict_ts(scorers)
refit_metric = self.refit_metric
if refit_metric not in list(scorers.keys()):
raise ValueError(
f"Refit Metric: '{refit_metric}' is not available. ",
f"Available Values are: {list(scorers.keys())}",
)
results = {}
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
n_splits = cv.get_n_splits(y)
if self.verbose > 0:
print( # noqa
f"Fitting {n_splits} folds for each of {n_candidates} "
f"candidates, totalling {n_candidates * n_splits} fits"
)
parallel = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch
)
out = parallel(
delayed(_fit_and_score)(
forecaster=clone(base_forecaster),
y=y,
X=X,
scoring=scorers,
train=train,
test=test,
parameters=parameters,
fit_params=fit_params,
return_train_score=self.return_train_score,
error_score=self.error_score,
**additional_scorer_kwargs,
)
for parameters in candidate_params
for train, test in get_folds(cv, y)
)
if len(out) < 1:
raise ValueError(
"No fits were performed. "
"Was the CV iterator empty? "
"Were there no candidates?"
)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, all_out, n_splits
)
return results
self._run_search(evaluate_candidates)
self.best_index_ = results["rank_test_%s" % refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][self.best_index_]
self.best_params_ = results["params"][self.best_index_]
self.best_forecaster_ = clone(base_forecaster).set_params(**self.best_params_)
if self.refit:
refit_start_time = time.time()
self.best_forecaster_.fit(y, X, **fit_params)
self.refit_time_ = time.time() - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = cv.get_n_splits(y)
self._is_fitted = True
return self
@staticmethod
def _format_results(candidate_params, scorers, out, n_splits):
"""From sklearn and sktime"""
n_candidates = len(candidate_params)
(test_scores_dict, fit_time, score_time, cutoffs) = zip(*out)
test_scores_dict = _aggregate_score_dicts(test_scores_dict)
results = {}
# From sklearn (with the addition of greater_is_better from sktime)
# INFO: For some reason, sklearn func does not work with sktime metrics
# without passing greater_is_better (as done in sktime) and processing
# it as such.
def _store(
key_name,
array,
weights=None,
splits=False,
rank=False,
greater_is_better=False,
):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_idx in range(n_splits):
# Uses closure to alter the results
results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
if key_name.startswith(("train_", "test_")) and np.any(
~np.isfinite(array_means)
):
warnings.warn(
f"One or more of the {key_name.split('_')[0]} scores "
f"are non-finite: {array_means}",
category=UserWarning,
)
# Weighted std is not directly available in numpy
array_stds = np.sqrt(
np.average(
(array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
)
)
results["std_%s" % key_name] = array_stds
if rank:
# This section is taken from sktime
array_means = -array_means if greater_is_better else array_means
results["rank_%s" % key_name] = np.asarray(
rankdata(array_means, method="min"), dtype=np.int32
)
_store("fit_time", fit_time)
_store("score_time", score_time)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(
partial(
np.ma.MaskedArray, np.empty(n_candidates,), mask=True, dtype=object,
)
)
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key "params"
results["params"] = candidate_params
for scorer_name, scorer in scorers.items():
# Computed the (weighted) mean and std for test scores alone
_store(
"test_%s" % scorer_name,
test_scores_dict[scorer_name],
splits=True,
rank=True,
weights=None,
greater_is_better=True if scorer._sign == 1 else False,
)
return results
class ForecastingGridSearchCV(BaseGridSearch):
def __init__(
self,
forecaster,
cv,
param_grid,
scoring=None,
n_jobs=None,
refit=True,
refit_metric: str = "smape",
verbose=0,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=False,
):
super(ForecastingGridSearchCV, self).__init__(
forecaster=forecaster,
cv=cv,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
refit=refit,
refit_metric=refit_metric,
scoring=scoring,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score,
)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
class ForecastingRandomizedSearchCV(BaseGridSearch):
def __init__(
self,
forecaster,
cv,
param_distributions,
n_iter=10,
scoring=None,
n_jobs=None,
refit=True,
refit_metric: str = "smape",
verbose=0,
random_state=None,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=False,
):
super(ForecastingRandomizedSearchCV, self).__init__(
forecaster=forecaster,
cv=cv,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
refit=refit,
refit_metric=refit_metric,
scoring=scoring,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score,
)
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
return evaluate_candidates(
ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
)
)
class TimeSeriesExperiment(_SupervisedExperiment):
def __init__(self) -> None:
super().__init__()
self._ml_usecase = MLUsecase.TIME_SERIES
self.exp_name_log = "ts-default-name"
# Values in variable_keys are accessible in globals
self.variable_keys = self.variable_keys.difference(
{
"target_param",
"iterative_imputation_iters_param",
"imputation_regressor",
"imputation_classifier",
"fold_shuffle_param",
"stratify_param",
"fold_groups_param",
}
)
self.variable_keys = self.variable_keys.union(
{
"fh",
"seasonal_period",
"seasonality_present",
"sp_to_use",
"strictly_positive",
"enforce_pi",
}
)
self._available_plots = {
"ts": "Time Series Plot",
"train_test_split": "Train Test Split",
"cv": "Cross Validation",
"acf": "Auto Correlation (ACF)",
"pacf": "Partial Auto Correlation (PACF)",
"decomp_classical": "Decomposition Classical",
"decomp_stl": "Decomposition STL",
"diagnostics": "Diagnostics Plot",
"forecast": "Out-of-Sample Forecast Plot",
"insample": "In-Sample Forecast Plot",
"residuals": "Residuals Plot",
}
self._available_plots_data_keys = [
"ts",
"train_test_split",
"cv",
"acf",
"pacf",
"decomp_classical",
"decomp_stl",
"diagnostics",
]
self._available_plots_estimator_keys = [
"ts",
"train_test_split",
"cv",
"acf",
"pacf",
"decomp_classical",
"decomp_stl",
"diagnostics",
"forecast",
"insample",
"residuals",
]
def _get_setup_display(self, **kwargs) -> Styler:
# define highlight function for function grid to display
functions = pd.DataFrame(
[
["session_id", self.seed],
# ["Target", self.target_param],
["Original Data", self.data_before_preprocess.shape],
["Missing Values", kwargs["missing_flag"]],
]
+ (
[
["Transformed Train Set", self.y_train.shape],
["Transformed Test Set", self.y_test.shape],
["Fold Generator", type(self.fold_generator).__name__],
["Fold Number", self.fold_param],
["Enforce Prediction Interval", self.enforce_pi],
["Seasonal Period Tested", self.seasonal_period],
["Seasonality Detected", self.seasonality_present],
["Seasonality Used in Models", self.sp_to_use],
["Target Strictly Positive", self.strictly_positive],
["Target White Noise", self.white_noise],
["Recommended d", self.lowercase_d],
["Recommended Seasonal D", self.uppercase_d],
["CPU Jobs", self.n_jobs_param],
["Use GPU", self.gpu_param],
["Log Experiment", self.logging_param],
["Experiment Name", self.exp_name_log],
["USI", self.USI],
]
)
+ (
[["Imputation Type", kwargs["imputation_type"]],]
if self.preprocess
else []
),
# + (
# [
# ["Transform Target", self.transform_target_param],
# ["Transform Target Method", self.transform_target_method_param],
# ]
# ),
columns=["Description", "Value"],
)
return functions.style.apply(highlight_setup)
def _get_models(self, raise_errors: bool = True) -> Tuple[dict, dict]:
all_models = {
k: v
for k, v in pycaret.containers.models.time_series.get_all_model_containers(
self.variables, raise_errors=raise_errors
).items()
if not v.is_special
}
all_models_internal = pycaret.containers.models.time_series.get_all_model_containers(
self.variables, raise_errors=raise_errors
)
return all_models, all_models_internal
def _get_metrics(self, raise_errors: bool = True) -> dict:
"""Gets the metrics for the Time Series Module
Parameters
----------
raise_errors : bool, optional
[description], by default True
Returns
-------
dict
[description]
"""
return pycaret.containers.metrics.time_series.get_all_metric_containers(
self.variables, raise_errors=raise_errors
)
def _get_default_plots_to_log(self) -> List[str]:
return ["forecast", "residuals", "diagnostics"]
def check_fh(self, fh: Union[List[int], int, np.array]) -> np.array:
"""
Checks fh for validity and converts fh into an appropriate forecasting
horizon compatible with sktime (if necessary)
Parameters
----------
fh : Union[List[int], int, np.array]
Forecasting Horizon
Returns
-------
np.array
Forecast Horizon (possibly updated to made compatible with sktime)
Raises
------
ValueError
(1) When forecast horizon is an integer < 1
(2) When forecast horizon is not the correct type
"""
if isinstance(fh, int):
if fh >= 1:
fh = np.arange(1, fh + 1)
else:
raise ValueError(
f"If Forecast Horizon `fh` is an integer, it must be >= 1. You provided fh = '{fh}'!"
)
elif isinstance(fh, List):
fh = np.array(fh)
elif isinstance(fh, np.ndarray):
# Good to go
pass
else:
raise ValueError(
f"Horizon `fh` must be a of type int, list, or numpy array, got object of {type(fh)} type!"
)
return fh
def setup(
self,
data: Union[pd.Series, pd.DataFrame],
preprocess: bool = True,
imputation_type: str = "simple",
# transform_target: bool = False,
# transform_target_method: str = "box-cox",
fold_strategy: Union[str, Any] = "expanding",
fold: int = 3,
fh: Union[List[int], int, np.array] = 1,
seasonal_period: Optional[Union[int, str]] = None,
enforce_pi: bool = False,
n_jobs: Optional[int] = -1,
use_gpu: bool = False,
custom_pipeline: Union[
Any, Tuple[str, Any], List[Any], List[Tuple[str, Any]]
] = None,
html: bool = True,
session_id: Optional[int] = None,
system_log: Union[bool, logging.Logger] = True,
log_experiment: bool = False,
experiment_name: Optional[str] = None,
log_plots: Union[bool, list] = False,
log_profile: bool = False,
log_data: bool = False,
verbose: bool = True,
profile: bool = False,
profile_kwargs: Dict[str, Any] = None,
):
"""
This function initializes the training environment and creates the transformation
pipeline. Setup function must be called before executing any other function. It takes
one mandatory parameters: ``data``. All the other parameters are optional.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
data : pandas.Series or pandas.DataFrame
Shape (n_samples, 1), when pandas.DataFrame, otherwise (n_samples, ).
preprocess: bool, default = True
Parameter not in use for now. Behavior may change in future.
imputation_type: str, default = 'simple'
Parameter not in use for now. Behavior may change in future.
fold_strategy: str or sklearn CV generator object, default = 'expanding'
Choice of cross validation strategy. Possible values are:
* 'expanding'
* 'rolling' (same as/aliased to 'expanding')
* 'sliding'
You can also pass an sktime compatible cross validation object such
as ``SlidingWindowSplitter`` or ``ExpandingWindowSplitter``. In this case,
the `fold` and `fh` parameters will be ignored and these values will
be extracted from the ``fold_strategy`` object directly.
fold: int, default = 3
Number of folds to be used in cross validation. Must be at least 2. This is
a global setting that can be over-written at function level by using ``fold``
parameter. Ignored when ``fold_strategy`` is a custom object.
fh: int or list or np.array, default = 1
The forecast horizon to be used for forecasting. Default is set to ``1`` i.e.
forecast one point ahead. When integer is passed it means N continuous points
in the future without any gap. If you want to forecast values with gaps, you
must pass an array e.g. np.arange([13, 25]) will skip the first 12 future
points and forecast from the 13th point till the 24th point ahead (note in
numpy right value is inclusive and left is exclusive).
seasonal_period: int or str, default = None
Seasonal period in timeseries data. If not provided the frequency of the data
index is mapped to a seasonal period as follows:
* 'S': 60
* 'T': 60
* 'H': 24
* 'D': 7
* 'W': 52
* 'M': 12
* 'Q': 4
* 'A': 1
* 'Y': 1
Alternatively you can provide a custom `seasonal_period` by passing
it as an integer or a string corresponding to the keys above (e.g.
'W' for weekly data, 'M' for monthly data, etc.).
enforce_pi: bool, default = False
When set to True, only models that support prediction intervals are
loaded in the environment.
n_jobs: int, default = -1
The number of jobs to run in parallel (for functions that supports parallel
processing) -1 means using all processors. To run all functions on single
processor set n_jobs to None.
use_gpu: bool or str, default = False
Parameter not in use for now. Behavior may change in future.
custom_pipeline: (str, transformer) or list of (str, transformer), default = None
Parameter not in use for now. Behavior may change in future.
html: bool, default = True
When set to False, prevents runtime display of monitor. This must be set to False
when the environment does not support IPython. For example, command line terminal,
Databricks Notebook, Spyder and other similar IDEs.
session_id: int, default = None
Controls the randomness of experiment. It is equivalent to 'random_state' in
scikit-learn. When None, a pseudo random number is generated. This can be used
for later reproducibility of the entire experiment.
system_log: bool or logging.Logger, default = True
Whether to save the system logging file (as logs.log). If the input already is a
logger object, that one is used instead.
log_experiment: bool, default = False
When set to True, all metrics and parameters are logged on the ``MLflow`` server.
experiment_name: str, default = None
Name of the experiment for logging. Ignored when ``log_experiment`` is not True.
log_plots: bool or list, default = False
When set to True, certain plots are logged automatically in the ``MLFlow`` server.
To change the type of plots to be logged, pass a list containing plot IDs. Refer
to documentation of ``plot_model``. Ignored when ``log_experiment`` is not True.
log_profile: bool, default = False
When set to True, data profile is logged on the ``MLflow`` server as a html file.
Ignored when ``log_experiment`` is not True.
log_data: bool, default = False
When set to True, dataset is logged on the ``MLflow`` server as a csv file.
Ignored when ``log_experiment`` is not True.
verbose: bool, default = True
When set to False, Information grid is not printed.
profile: bool, default = False
When set to True, an interactive EDA report is displayed.
profile_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the ProfileReport method used
to create the EDA report. Ignored if ``profile`` is False.
Returns:
Global variables that can be changed using the ``set_config`` function.
"""
from sktime.utils.seasonality import (
autocorrelation_seasonality_test,
) # only needed in setup
## Make a local copy so as not to perfrom inplace operation on the
## original dataset
data_ = data.copy()
if isinstance(data_, pd.Series) and data_.name is None:
data_.name = "Time Series"
# Forecast Horizon Checks
if fh is None and isinstance(fold_strategy, str):
raise ValueError(
f"The forecast horizon `fh` must be provided when fold_strategy is of type 'string'"
)
# Check Fold Strategy
if not isinstance(fold_strategy, str):
self.logger.info(
f"fh parameter {fh} will be ignored since fold_strategy has been provided. "
f"fh from fold_strategy will be used instead."
)
fh = fold_strategy.fh
self.logger.info(
f"fold parameter {fold} will be ignored since fold_strategy has been provided. "
f"fold based on fold_strategy will be used instead."
)
# fold value will be reset after the data is split in the parent class setup
fh = self.check_fh(fh)
self.fh = fh
# Check Index
allowed_freq_index_types = (pd.PeriodIndex, pd.DatetimeIndex)
if (
not isinstance(data_.index, allowed_freq_index_types)
and seasonal_period is None
):
# https://stackoverflow.com/questions/3590165/join-a-list-of-items-with-different-types-as-string-in-python
raise ValueError(
f"The index of your 'data' is of type '{type(data_.index)}'. "
"If the 'data' index is not of one of the following types: "
f"{', '.join(str(type) for type in allowed_freq_index_types)}, "
"then 'seasonal_period' must be provided. Refer to docstring for options."
)
if isinstance(data_.index, pd.DatetimeIndex):
data_.index = data_.index.to_period()
if seasonal_period is None:
index_freq = data_.index.freqstr
self.seasonal_period = get_sp_from_str(str_freq = index_freq)
else:
if not isinstance(seasonal_period, (int, str)):
raise ValueError(
f"seasonal_period parameter must be an int or str, got {type(seasonal_period)}"
)
if isinstance(seasonal_period, str):
self.seasonal_period = get_sp_from_str(str_freq = seasonal_period)
else:
self.seasonal_period = seasonal_period
if isinstance(data_, (pd.Series, pd.DataFrame)):
if isinstance(data_, pd.DataFrame):
if data_.shape[1] != 1:
raise ValueError(
f"data must be a pandas Series or DataFrame with one column, got {data_.shape[1]} columns!"
)
data_ = data_.copy()
else:
data_ = pd.DataFrame(data_) # Force convertion to DataFrame
else:
raise ValueError(
f"data must be a pandas Series or DataFrame, got object of {type(data_)} type!"
)
data_.columns = [str(x) for x in data_.columns]
target_name = data_.columns[0]
if not np.issubdtype(data_[target_name].dtype, np.number):
raise TypeError(
f"Data must be of 'numpy.number' subtype, got {data_[target_name].dtype}!"
)
if len(data_.index) != len(set(data_.index)):
raise ValueError("Index may not have duplicate values!")
# check valid seasonal parameter
self.seasonality_present = autocorrelation_seasonality_test(
data_[target_name], self.seasonal_period
)
# What seasonal period should be used for modeling?
self.sp_to_use = self.seasonal_period if self.seasonality_present else 1
# Should multiplicative components be allowed in models that support it
self.strictly_positive = np.all(data_[target_name] > 0)
self.enforce_pi = enforce_pi
return super().setup(
data=data_,
target=data_.columns[0],
test_data=None,
preprocess=preprocess,
imputation_type=imputation_type,
categorical_features=None,
ordinal_features=None,
high_cardinality_features=None,
numeric_features=None,
date_features=None,
ignore_features=None,
normalize=False,
transformation=False,
handle_unknown_categorical=False,
pca=False,
ignore_low_variance=False,
combine_rare_levels=False,
bin_numeric_features=None,
remove_outliers=False,
remove_multicollinearity=False,
remove_perfect_collinearity=False,
create_clusters=False,
polynomial_features=False,
trigonometry_features=False,
group_features=None,
feature_selection=False,
feature_interaction=False,
transform_target=False,
data_split_shuffle=False,
data_split_stratify=False,
fold_strategy=fold_strategy,
fold=fold,
fh=fh,
seasonal_period=seasonal_period,
fold_shuffle=False,
n_jobs=n_jobs,
use_gpu=use_gpu,
custom_pipeline=custom_pipeline,
html=html,
session_id=session_id,
system_log=system_log,
log_experiment=log_experiment,
experiment_name=experiment_name,
log_plots=log_plots,
log_profile=log_profile,
log_data=log_data,
silent=True,
verbose=verbose,
profile=profile,
profile_kwargs=profile_kwargs,
)
def compare_models(
self,
include: Optional[List[Union[str, Any]]] = None,
exclude: Optional[List[str]] = None,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
cross_validation: bool = True,
sort: str = "smape",
n_select: int = 1,
budget_time: Optional[float] = None,
turbo: bool = True,
errors: str = "ignore",
fit_kwargs: Optional[dict] = None,
verbose: bool = True,
):
"""
This function trains and evaluates performance of all estimators available in the
model library using cross validation. The output of this function is a score grid
with average cross validated scores. Metrics evaluated during CV can be accessed
using the ``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function.
Example
--------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> best_model = compare_models()
include: list of str or sktime compatible object, default = None
To train and evaluate select models, list containing model ID or scikit-learn
compatible object can be passed in include param. To see a list of all models
available in the model library use the ``models`` function.
exclude: list of str, default = None
To omit certain models from training and evaluation, pass a list containing
model id in the exclude parameter. To see a list of all models available
in the model library use the ``models`` function.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
cross_validation: bool, default = True
When set to False, metrics are evaluated on holdout set. ``fold`` param
is ignored when cross_validation is set to False.
sort: str, default = 'SMAPE'
The sort order of the score grid. It also accepts custom metrics that are
added through the ``add_metric`` function.
n_select: int, default = 1
Number of top_n models to return. For example, to select top 3 models use
n_select = 3.
budget_time: int or float, default = None
If not None, will terminate execution of the function after budget_time
minutes have passed and return results up to that point.
turbo: bool, default = True
When set to True, it excludes estimators with longer training times. To
see which algorithms are excluded use the ``models`` function.
errors: str, default = 'ignore'
When set to 'ignore', will skip the model with exceptions and continue.
If 'raise', will break the function when exceptions are raised.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained model or list of trained models, depending on the ``n_select`` param.
Warnings
--------
- Changing turbo parameter to False may result in very high training times.
- No models are logged in ``MLflow`` when ``cross_validation`` parameter is False.
"""
return super().compare_models(
include=include,
exclude=exclude,
fold=fold,
round=round,
cross_validation=cross_validation,
sort=sort,
n_select=n_select,
budget_time=budget_time,
turbo=turbo,
errors=errors,
fit_kwargs=fit_kwargs,
verbose=verbose,
)
def create_model(
self,
estimator: Union[str, Any],
fold: Optional[Union[int, Any]] = None,
round: int = 4,
cross_validation: bool = True,
fit_kwargs: Optional[dict] = None,
verbose: bool = True,
**kwargs,
):
"""
This function trains and evaluates the performance of a given estimator
using cross validation. The output of this function is a score grid with
CV scores by fold. Metrics evaluated during CV can be accessed using the
``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function. All the available models
can be accessed using the ``models`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> naive = create_model('naive')
estimator: str or sktime compatible object
ID of an estimator available in model library or pass an untrained
model object consistent with scikit-learn API. Estimators available
in the model library (ID - Name):
* 'naive' - Naive Forecaster
* 'grand_means' - Grand Means Forecaster
* 'snaive' - Seasonal Naive Forecaster (disabled when seasonal_period = 1)
* 'polytrend' - Polynomial Trend Forecaster
* 'arima' - ARIMA family of models (ARIMA, SARIMA, SARIMAX)
* 'auto_arima' - Auto ARIMA
* 'arima' - ARIMA
* 'exp_smooth' - Exponential Smoothing
* 'ets' - ETS
* 'theta' - Theta Forecaster
* 'tbats' - TBATS
* 'bats' - BATS
* 'prophet' - Prophet Forecaster
* 'lr_cds_dt' - Linear w/ Cond. Deseasonalize & Detrending
* 'en_cds_dt' - Elastic Net w/ Cond. Deseasonalize & Detrending
* 'ridge_cds_dt' - Ridge w/ Cond. Deseasonalize & Detrending
* 'lasso_cds_dt' - Lasso w/ Cond. Deseasonalize & Detrending
* 'lar_cds_dt' - Least Angular Regressor w/ Cond. Deseasonalize & Detrending
* 'llar_cds_dt' - Lasso Least Angular Regressor w/ Cond. Deseasonalize & Detrending
* 'br_cds_dt' - Bayesian Ridge w/ Cond. Deseasonalize & Deseasonalize & Detrending
* 'huber_cds_dt' - Huber w/ Cond. Deseasonalize & Detrending
* 'par_cds_dt' - Passive Aggressive w/ Cond. Deseasonalize & Detrending
* 'omp_cds_dt' - Orthogonal Matching Pursuit w/ Cond. Deseasonalize & Detrending
* 'knn_cds_dt' - K Neighbors w/ Cond. Deseasonalize & Detrending
* 'dt_cds_dt' - Decision Tree w/ Cond. Deseasonalize & Detrending
* 'rf_cds_dt' - Random Forest w/ Cond. Deseasonalize & Detrending
* 'et_cds_dt' - Extra Trees w/ Cond. Deseasonalize & Detrending
* 'gbr_cds_dt' - Gradient Boosting w/ Cond. Deseasonalize & Detrending
* 'ada_cds_dt' - AdaBoost w/ Cond. Deseasonalize & Detrending
* 'lightgbm_cds_dt' - Light Gradient Boosting w/ Cond. Deseasonalize & Detrending
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
cross_validation: bool, default = True
When set to False, metrics are evaluated on holdout set. ``fold`` param
is ignored when cross_validation is set to False.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns:
Trained Model
Warnings
--------
- Models are not logged on the ``MLFlow`` server when ``cross_validation`` param
is set to False.
"""
return super().create_model(
estimator=estimator,
fold=fold,
round=round,
cross_validation=cross_validation,
fit_kwargs=fit_kwargs,
verbose=verbose,
**kwargs,
)
@staticmethod
def update_fit_kwargs_with_fh_from_cv(fit_kwargs: Optional[Dict], cv) -> Dict:
"""Updated the fit_ kwargs to include the fh parameter from cv
Parameters
----------
fit_kwargs : Optional[Dict]
Original fit kwargs
cv : [type]
cross validation object
Returns
-------
Dict[Any]
Updated fit kwargs
"""
fh_param = {"fh": cv.fh}
if fit_kwargs is None:
fit_kwargs = fh_param
else:
fit_kwargs.update(fh_param)
return fit_kwargs
def _create_model_without_cv(
self, model, data_X, data_y, fit_kwargs, predict, system, display: Display
):
# with estimator_pipeline(self._internal_pipeline, model) as pipeline_with_model:
self.logger.info(
"Support for Exogenous variables not yet supported. Switching X, y order"
)
data_X, data_y = data_y, data_X
fit_kwargs = get_pipeline_fit_kwargs(model, fit_kwargs)
self.logger.info("Cross validation set to False")
self.logger.info("Fitting Model")
model_fit_start = time.time()
with io.capture_output():
model.fit(data_X, data_y, **fit_kwargs)
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
display.move_progress()
if predict:
self.predict_model(model, verbose=False)
model_results = self.pull(pop=True).drop("Model", axis=1)
self.display_container.append(model_results)
display.display(
model_results, clear=system, override=False if not system else None,
)
self.logger.info(f"display_container: {len(self.display_container)}")
return model, model_fit_time
def _create_model_with_cv(
self,
model,
data_X,
data_y,
fit_kwargs,
round,
cv,
groups, # TODO: See if we can remove groups
metrics,
refit,
system,
display,
):
"""
MONITOR UPDATE STARTS
"""
# display.update_monitor(
# 1, f"Fitting {_get_cv_n_folds(data_y, cv)} Folds",
# )
display.update_monitor(
1, f"Fitting {cv.get_n_splits(data_y)} Folds",
)
display.display_monitor()
"""
MONITOR UPDATE ENDS
"""
metrics_dict = {k: v.scorer for k, v in metrics.items()}
self.logger.info("Starting cross validation")
n_jobs = self._gpu_n_jobs_param
# fit_kwargs = get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)
self.logger.info(f"Cross validating with {cv}, n_jobs={n_jobs}")
# Cross Validate time series
# fh_param = {"fh": cv.fh}
# if fit_kwargs is None:
# fit_kwargs = fh_param
# else:
# fit_kwargs.update(fh_param)
fit_kwargs = self.update_fit_kwargs_with_fh_from_cv(
fit_kwargs=fit_kwargs, cv=cv
)
model_fit_start = time.time()
additional_scorer_kwargs = self.get_additional_scorer_kwargs()
scores, cutoffs = cross_validate_ts(
# Commented out since supervised_experiment also does not clone
# when doing cross_validate
# forecaster=clone(model),
forecaster=model,
y=data_y,
X=data_X,
scoring=metrics_dict,
cv=cv,
n_jobs=n_jobs,
verbose=0,
fit_params=fit_kwargs,
return_train_score=False,
error_score=0,
**additional_scorer_kwargs,
)
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
# Scores has metric names in lowercase, scores_dict has metric names in uppercase
score_dict = {v.display_name: scores[f"{k}"] for k, v in metrics.items()}
self.logger.info("Calculating mean and std")
try:
avgs_dict = {k: [np.mean(v), np.std(v)] for k, v in score_dict.items()}
except TypeError:
# When there is an error in model creation, score_dict values are None.
# e.g.
# {
# 'MAE': [None, None, None],
# 'RMSE': [None, None, None],
# 'MAPE': [None, None, None],
# 'SMAPE': [None, None, None],
# 'R2': [None, None, None]
# }
# Hence, mean and sd can not be computed
# TypeError: unsupported operand type(s) for +: 'NoneType' and 'NoneType'
avgs_dict = {k: [np.nan, np.nan] for k, v in score_dict.items()}
display.move_progress()
self.logger.info("Creating metrics dataframe")
model_results = pd.DataFrame(score_dict)
model_results.insert(0, "cutoff", cutoffs)
model_avgs = pd.DataFrame(avgs_dict, index=["Mean", "SD"],)
model_avgs.insert(0, "cutoff", np.nan)
model_results = model_results.append(model_avgs)
# Round the results
model_results = model_results.round(round)
# yellow the mean (converts model_results from dataframe to dataframe styler)
model_results = color_df(model_results, "yellow", ["Mean"], axis=1)
model_results = model_results.set_precision(round)
if refit:
# refitting the model on complete X_train, y_train
display.update_monitor(1, "Finalizing Model")
display.display_monitor()
model_fit_start = time.time()
self.logger.info("Finalizing model")
with io.capture_output():
model.fit(y=data_y, X=data_X, **fit_kwargs)
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
else:
# Set fh explicitly since we are not fitting explicitly
# This is needed so that the model can be used later to predict, etc.
model._set_fh(fit_kwargs.get("fh"))
# model_fit_time /= _get_cv_n_folds(data_y, cv)
model_fit_time /= cv.get_n_splits(data_y)
# return model, model_fit_time, model_results, avgs_dict
return model, model_fit_time, model_results, avgs_dict
def tune_model(
self,
estimator,
fold: Optional[Union[int, Any]] = None,
round: int = 4,
n_iter: int = 10,
custom_grid: Optional[Union[Dict[str, list], Any]] = None,
optimize: str = "SMAPE",
custom_scorer=None,
search_algorithm: Optional[str] = None,
choose_better: bool = True,
fit_kwargs: Optional[dict] = None,
return_tuner: bool = False,
verbose: bool = True,
tuner_verbose: Union[int, bool] = True,
display: Optional[Display] = None,
**kwargs,
):
"""
This function tunes the hyperparameters of a given estimator. The output of
this function is a score grid with CV scores by fold of the best selected
model based on ``optimize`` parameter. Metrics evaluated during CV can be
accessed using the ``get_metrics`` function. Custom metrics can be added
or removed using ``add_metric`` and ``remove_metric`` function.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> dt = create_model('dt_cds_dt')
>>> tuned_dt = tune_model(dt)
estimator: sktime compatible object
Trained model object
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
n_iter: int, default = 10
Number of iterations in the grid search. Increasing 'n_iter' may improve
model performance but also increases the training time.
custom_grid: dictionary, default = None
To define custom search space for hyperparameters, pass a dictionary with
parameter name and values to be iterated. Custom grids must be in a format
supported by the defined ``search_library``.
optimize: str, default = 'SMAPE'
Metric name to be evaluated for hyperparameter tuning. It also accepts custom
metrics that are added through the ``add_metric`` function.
custom_scorer: object, default = None
custom scoring strategy can be passed to tune hyperparameters of the model.
It must be created using ``sklearn.make_scorer``. It is equivalent of adding
custom metric using the ``add_metric`` function and passing the name of the
custom metric in the ``optimize`` parameter.
Will be deprecated in future.
search_algorithm: str, default = 'random'
use 'random' for random grid search and 'grid' for complete grid search.
choose_better: bool, default = True
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the tuner.
return_tuner: bool, default = False
When set to True, will return a tuple of (model, tuner_object).
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
tuner_verbose: bool or in, default = True
If True or above 0, will print messages from the tuner. Higher values
print more messages. Ignored when ``verbose`` param is False.
**kwargs:
Additional keyword arguments to pass to the optimizer.
Returns:
Trained Model and Optional Tuner Object when ``return_tuner`` is True.
"""
search_library = "pycaret" # only 1 library supported right now
_allowed_search_algorithms = []
if search_library == "pycaret":
_allowed_search_algorithms = [None, "random", "grid"]
if search_algorithm not in _allowed_search_algorithms:
raise ValueError(
"`search_algorithm` must be one of "
f"'{', '.join(str(allowed_type) for allowed_type in _allowed_search_algorithms)}'. "
f"You passed '{search_algorithm}'."
)
function_params_str = ", ".join([f"{k}={v}" for k, v in locals().items()])
self.logger.info("Initializing tune_model()")
self.logger.info(f"tune_model({function_params_str})")
self.logger.info("Checking exceptions")
# run_time
runtime_start = time.time()
if not fit_kwargs:
fit_kwargs = {}
# checking estimator if string
if type(estimator) is str:
raise TypeError(
"The behavior of tune_model in version 1.0.1 is changed. Please pass trained model object."
)
# Check for estimator
if not hasattr(estimator, "fit"):
raise ValueError(
f"Estimator {estimator} does not have the required fit() method."
)
# checking fold parameter
if fold is not None and not (
type(fold) is int or is_sklearn_cv_generator(fold)
):
raise TypeError(
"fold parameter must be either None, an integer or a scikit-learn compatible CV generator object."
)
# checking round parameter
if type(round) is not int:
raise TypeError("Round parameter only accepts integer value.")
# checking n_iter parameter
if type(n_iter) is not int:
raise TypeError("n_iter parameter only accepts integer value.")
if isinstance(optimize, str):
# checking optimize parameter
# TODO: Changed with reference to other ML Usecases. Check with Antoni
# optimize = self._get_metric_by_name_or_id(optimize)
# if optimize is None:
# raise ValueError(
# "Optimize method not supported. See docstring for list of available parameters."
# )
optimize_container = self._get_metric_by_name_or_id(optimize)
if optimize_container is None:
raise ValueError(
"Optimize method not supported. See docstring for list of available parameters."
)
else:
self.logger.info(f"optimize set to user defined function {optimize}")
# checking verbose parameter
if type(verbose) is not bool:
raise TypeError(
"verbose parameter can only take argument as True or False."
)
# checking verbose parameter
if type(return_tuner) is not bool:
raise TypeError(
"return_tuner parameter can only take argument as True or False."
)
if not verbose:
tuner_verbose = 0
if type(tuner_verbose) not in (bool, int):
raise TypeError("tuner_verbose parameter must be a bool or an int.")
tuner_verbose = int(tuner_verbose)
if tuner_verbose < 0:
tuner_verbose = 0
elif tuner_verbose > 2:
tuner_verbose = 2
"""
ERROR HANDLING ENDS HERE
"""
# cross validation setup starts here
cv = self.get_fold_generator(fold=fold)
if not display:
progress_args = {"max": 3 + 4}
master_display_columns = [
v.display_name for k, v in self._all_metrics.items()
]
if self._ml_usecase == MLUsecase.TIME_SERIES:
master_display_columns.insert(0, "cutoff")
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor_rows = [
["Initiated", ". . . . . . . . . . . . . . . . . .", timestampStr],
[
"Status",
". . . . . . . . . . . . . . . . . .",
"Loading Dependencies",
],
[
"Estimator",
". . . . . . . . . . . . . . . . . .",
"Compiling Library",
],
]
display = Display(
verbose=verbose,
html_param=self.html_param,
progress_args=progress_args,
master_display_columns=master_display_columns,
monitor_rows=monitor_rows,
)
display.display_progress()
display.display_monitor()
display.display_master_display()
# ignore warnings
warnings.filterwarnings("ignore")
import logging
np.random.seed(self.seed)
self.logger.info("Copying training dataset")
# Storing X_train and y_train in data_X and data_y parameter
data_X = self.X_train.copy()
data_y = self.y_train.copy()
# Replace Empty DataFrame with None as empty DataFrame causes issues
if (data_X.shape[0] == 0) or (data_X.shape[1] == 0):
data_X = None
display.move_progress()
# setting optimize parameter
# TODO: Changed compared to other PyCaret UseCases (Check with Antoni)
# optimize = optimize.scorer
compare_dimension = optimize_container.display_name
optimize_metric_dict = {optimize_container.id: optimize_container.scorer}
# Returns a dictionary of all metric containers (disabled for now since
# we only need optimize metric)
# {'mae': <pycaret.containers....783DEB0C8>, 'rmse': <pycaret.containers....783DEB148> ...}
# all_metric_containers = self._all_metrics
# # Returns a dictionary of all metric scorers (disabled for now since
# we only need optimize metric)
# {'mae': 'neg_mean_absolute_error', 'rmse': 'neg_root_mean_squared_error' ...}
# all_metrics_dict = {
# all_metric_containers[metric_id].id: all_metric_containers[metric_id].scorer
# for metric_id in all_metric_containers
# }
refit_metric = optimize_container.id # Name of the metric: e.g. 'mae'
# convert trained estimator into string name for grids
self.logger.info("Checking base model")
is_stacked_model = False
if hasattr(estimator, "final_estimator"):
self.logger.info("Model is stacked, using the definition of the meta-model")
is_stacked_model = True
estimator_id = self._get_model_id(estimator.final_estimator)
else:
estimator_id = self._get_model_id(estimator)
if estimator_id is None:
if custom_grid is None:
raise ValueError(
"When passing a model not in PyCaret's model library, the custom_grid parameter must be provided."
)
estimator_name = self._get_model_name(estimator)
estimator_definition = None
self.logger.info("A custom model has been passed")
else:
estimator_definition = self._all_models_internal[estimator_id] # Container
estimator_name = estimator_definition.name
self.logger.info(f"Base model : {estimator_name}")
# If no special tunable class is defined inside PyCaret then just clone the estimator
if estimator_definition is None or estimator_definition.tunable is None:
model = clone(estimator)
# If special tunable class is defined, then use that instead
else:
self.logger.info("Model has a special tunable class, using that")
model = clone(estimator_definition.tunable(**estimator.get_params()))
is_stacked_model = False
base_estimator = model
display.update_monitor(2, estimator_name)
display.display_monitor()
display.move_progress()
self.logger.info("Declaring metric variables")
"""
MONITOR UPDATE STARTS
"""
display.update_monitor(1, "Searching Hyperparameters")
display.display_monitor()
"""
MONITOR UPDATE ENDS
"""
self.logger.info("Defining Hyperparameters")
if search_algorithm is None:
search_algorithm = "random" # Defaults to Random
###########################
#### Define Param Grid ----
###########################
param_grid = None
if custom_grid is not None:
param_grid = custom_grid
self.logger.info(f"custom_grid: {param_grid}")
elif search_library == "pycaret":
if search_algorithm == "grid":
param_grid = estimator_definition.tune_grid
elif search_algorithm == "random":
param_grid = estimator_definition.tune_distribution
if not param_grid:
raise ValueError(
"parameter grid for tuning is empty. If passing custom_grid, "
"make sure that it is not empty. If not passing custom_grid, "
"the passed estimator does not have a built-in tuning grid."
)
suffixes = []
if is_stacked_model:
self.logger.info(
"Stacked model passed, will tune meta model hyperparameters"
)
suffixes.append("final_estimator")
gc.collect()
# with estimator_pipeline(self._internal_pipeline, model) as pipeline_with_model:
if True:
# fit_kwargs = get_pipeline_fit_kwargs(pipeline_with_model, fit_kwargs)
# fh_param = {"fh": cv.fh}
# if fit_kwargs is None:
# fit_kwargs = fh_param
# else:
# fit_kwargs.update(fh_param)
fit_kwargs = self.update_fit_kwargs_with_fh_from_cv(
fit_kwargs=fit_kwargs, cv=cv
)
# actual_estimator_label = get_pipeline_estimator_label(pipeline_with_model)
actual_estimator_label = ""
# suffixes.append(actual_estimator_label)
# suffixes = "__".join(reversed(suffixes))
# param_grid = {f"{suffixes}__{k}": v for k, v in param_grid.items()}
if estimator_definition is not None:
search_kwargs = {**estimator_definition.tune_args, **kwargs}
n_jobs = (
self._gpu_n_jobs_param
if estimator_definition.is_gpu_enabled
else self.n_jobs_param
)
else:
search_kwargs = {}
n_jobs = self.n_jobs_param
self.logger.info(f"Tuning with n_jobs={n_jobs}")
if search_library == "pycaret":
if search_algorithm == "random":
try:
param_grid = get_base_distributions(param_grid)
except:
self.logger.warning(
"Couldn't convert param_grid to specific library distributions. Exception:"
)
self.logger.warning(traceback.format_exc())
if search_library == "pycaret":
if search_algorithm == "grid":
self.logger.info("Initializing ForecastingGridSearchCV")
model_grid = ForecastingGridSearchCV(
forecaster=model,
cv=cv,
param_grid=param_grid,
scoring=optimize_metric_dict,
refit_metric=refit_metric,
n_jobs=n_jobs,
verbose=tuner_verbose,
refit=False, # since we will refit afterwards anyway
**search_kwargs,
)
elif search_algorithm == "random":
self.logger.info("Initializing ForecastingRandomizedGridSearchCV")
model_grid = ForecastingRandomizedSearchCV(
forecaster=model,
cv=cv,
param_distributions=param_grid,
n_iter=n_iter,
scoring=optimize_metric_dict,
refit_metric=refit_metric,
n_jobs=n_jobs,
verbose=tuner_verbose,
random_state=self.seed,
refit=False, # since we will refit afterwards anyway
**search_kwargs,
)
else:
raise NotImplementedError(
f"Search type '{search_algorithm}' is not supported"
)
additional_scorer_kwargs = self.get_additional_scorer_kwargs()
model_grid.fit(
y=data_y,
X=data_X,
additional_scorer_kwargs=additional_scorer_kwargs,
**fit_kwargs,
)
best_params = model_grid.best_params_
self.logger.info(f"best_params: {best_params}")
best_params = {**best_params}
if actual_estimator_label:
best_params = {
k.replace(f"{actual_estimator_label}__", ""): v
for k, v in best_params.items()
}
cv_results = None
try:
cv_results = model_grid.cv_results_
except:
self.logger.warning(
"Couldn't get cv_results from model_grid. Exception:"
)
self.logger.warning(traceback.format_exc())
display.move_progress()
self.logger.info("Hyperparameter search completed")
if isinstance(model, TunableMixin):
self.logger.info("Getting base sklearn object from tunable")
best_params = {
k: v
for k, v in model.get_params().items()
if k in model.get_base_sklearn_params().keys()
}
model = model.get_base_sklearn_object()
self.logger.info(
"SubProcess create_model() called =================================="
)
best_model, model_fit_time = self.create_model(
estimator=model,
system=False,
display=display,
fold=fold,
round=round,
fit_kwargs=fit_kwargs,
**best_params,
)
model_results = self.pull()
self.logger.info(
"SubProcess create_model() end =================================="
)
if choose_better:
best_model = self._choose_better(
[estimator, (best_model, model_results)],
compare_dimension,
fold,
fit_kwargs=fit_kwargs,
display=display,
)
# end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
# mlflow logging
if self.logging_param:
avgs_dict_log = {k: v for k, v in model_results.loc["Mean"].items()}
try:
self._mlflow_log_model(
model=best_model,
model_results=model_results,
score_dict=avgs_dict_log,
source="tune_model",
runtime=runtime,
model_fit_time=model_fit_time,
_prep_pipe=self.prep_pipe,
log_plots=self.log_plots_param,
tune_cv_results=cv_results,
display=display,
)
except:
self.logger.error(
f"_mlflow_log_model() for {best_model} raised an exception:"
)
self.logger.error(traceback.format_exc())
model_results = color_df(model_results, "yellow", ["Mean"], axis=1)
model_results = model_results.set_precision(round)
display.display(model_results, clear=True)
self.logger.info(f"master_model_container: {len(self.master_model_container)}")
self.logger.info(f"display_container: {len(self.display_container)}")
self.logger.info(str(best_model))
self.logger.info(
"tune_model() succesfully completed......................................"
)
gc.collect()
if return_tuner:
return (best_model, model_grid)
return best_model
# def ensemble_model(
# self,
# estimator,
# method: str = "Bagging",
# fold: Optional[Union[int, Any]] = None,
# n_estimators: int = 10,
# round: int = 4,
# choose_better: bool = False,
# optimize: str = "R2",
# fit_kwargs: Optional[dict] = None,
# verbose: bool = True,
# ) -> Any:
# """
# This function ensembles a given estimator. The output of this function is
# a score grid with CV scores by fold. Metrics evaluated during CV can be
# accessed using the ``get_metrics`` function. Custom metrics can be added
# or removed using ``add_metric`` and ``remove_metric`` function.
# Example
# --------
# >>> from pycaret.datasets import get_data
# >>> boston = get_data('boston')
# >>> from pycaret.regression import *
# >>> exp_name = setup(data = boston, target = 'medv')
# >>> dt = create_model('dt')
# >>> bagged_dt = ensemble_model(dt, method = 'Bagging')
# estimator: scikit-learn compatible object
# Trained model object
# method: str, default = 'Bagging'
# Method for ensembling base estimator. It can be 'Bagging' or 'Boosting'.
# fold: int or scikit-learn compatible CV generator, default = None
# Controls cross-validation. If None, the CV generator in the ``fold_strategy``
# parameter of the ``setup`` function is used. When an integer is passed,
# it is interpreted as the 'n_splits' parameter of the CV generator in the
# ``setup`` function.
# n_estimators: int, default = 10
# The number of base estimators in the ensemble. In case of perfect fit, the
# learning procedure is stopped early.
# round: int, default = 4
# Number of decimal places the metrics in the score grid will be rounded to.
# choose_better: bool, default = False
# When set to True, the returned object is always better performing. The
# metric used for comparison is defined by the ``optimize`` parameter.
# optimize: str, default = 'R2'
# Metric to compare for model selection when ``choose_better`` is True.
# fit_kwargs: dict, default = {} (empty dict)
# Dictionary of arguments passed to the fit method of the model.
# verbose: bool, default = True
# Score grid is not printed when verbose is set to False.
# Returns:
# Trained Model
# """
# return super().ensemble_model(
# estimator=estimator,
# method=method,
# fold=fold,
# n_estimators=n_estimators,
# round=round,
# choose_better=choose_better,
# optimize=optimize,
# fit_kwargs=fit_kwargs,
# verbose=verbose,
# )
def blend_models(
self,
estimator_list: list,
method: str = "mean",
fold: Optional[Union[int, Any]] = None,
round: int = 4,
choose_better: bool = False,
optimize: str = "SMAPE",
weights: Optional[List[float]] = None,
fit_kwargs: Optional[dict] = None,
verbose: bool = True,
):
"""
This function trains a EnsembleForecaster for select models passed in the
``estimator_list`` param. The output of this function is a score grid with
CV scores by fold. Metrics evaluated during CV can be accessed using the
``get_metrics`` function. Custom metrics can be added or removed using
``add_metric`` and ``remove_metric`` function.
Example
--------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> top3 = compare_models(n_select = 3)
>>> blender = blend_models(top3)
estimator_list: list of sktime compatible estimators
List of model objects
method: str, default = 'mean'
Method to average the individual predictions to form a final prediction.
Available Methods:
* 'mean' - Mean of individual predictions
* 'median' - Median of individual predictions
* 'voting' - Vote individual predictions based on the provided weights.
fold: int or scikit-learn compatible CV generator, default = None
Controls cross-validation. If None, the CV generator in the ``fold_strategy``
parameter of the ``setup`` function is used. When an integer is passed,
it is interpreted as the 'n_splits' parameter of the CV generator in the
``setup`` function.
round: int, default = 4
Number of decimal places the metrics in the score grid will be rounded to.
choose_better: bool, default = False
When set to True, the returned object is always better performing. The
metric used for comparison is defined by the ``optimize`` parameter.
optimize: str, default = 'SMAPE'
Metric to compare for model selection when ``choose_better`` is True.
weights: list, default = None
Sequence of weights (float or int) to weight the occurrences of predicted class
labels (hard voting) or class probabilities before averaging (soft voting). Uses
uniform weights when None.
fit_kwargs: dict, default = {} (empty dict)
Dictionary of arguments passed to the fit method of the model.
verbose: bool, default = True
Score grid is not printed when verbose is set to False.
Returns:
Trained Model
"""
return super().blend_models(
estimator_list=estimator_list,
fold=fold,
round=round,
choose_better=choose_better,
optimize=optimize,
method=method,
weights=weights,
fit_kwargs=fit_kwargs,
verbose=verbose,
)
# def stack_models(
# self,
# estimator_list: list,
# meta_model=None,
# fold: Optional[Union[int, Any]] = None,
# round: int = 4,
# restack: bool = False,
# choose_better: bool = False,
# optimize: str = "R2",
# fit_kwargs: Optional[dict] = None,
# verbose: bool = True,
# ):
# """
# This function trains a meta model over select estimators passed in
# the ``estimator_list`` parameter. The output of this function is a
# score grid with CV scores by fold. Metrics evaluated during CV can
# be accessed using the ``get_metrics`` function. Custom metrics
# can be added or removed using ``add_metric`` and ``remove_metric``
# function.
# Example
# --------
# >>> from pycaret.datasets import get_data
# >>> boston = get_data('boston')
# >>> from pycaret.regression import *
# >>> exp_name = setup(data = boston, target = 'medv')
# >>> top3 = compare_models(n_select = 3)
# >>> stacker = stack_models(top3)
# estimator_list: list of scikit-learn compatible objects
# List of trained model objects
# meta_model: scikit-learn compatible object, default = None
# When None, Linear Regression is trained as a meta model.
# fold: int or scikit-learn compatible CV generator, default = None
# Controls cross-validation. If None, the CV generator in the ``fold_strategy``
# parameter of the ``setup`` function is used. When an integer is passed,
# it is interpreted as the 'n_splits' parameter of the CV generator in the
# ``setup`` function.
# round: int, default = 4
# Number of decimal places the metrics in the score grid will be rounded to.
# restack: bool, default = False
# When set to False, only the predictions of estimators will be used as
# training data for the ``meta_model``.
# choose_better: bool, default = False
# When set to True, the returned object is always better performing. The
# metric used for comparison is defined by the ``optimize`` parameter.
# optimize: str, default = 'R2'
# Metric to compare for model selection when ``choose_better`` is True.
# fit_kwargs: dict, default = {} (empty dict)
# Dictionary of arguments passed to the fit method of the model.
# verbose: bool, default = True
# Score grid is not printed when verbose is set to False.
# Returns:
# Trained Model
# """
# return super().stack_models(
# estimator_list=estimator_list,
# meta_model=meta_model,
# fold=fold,
# round=round,
# method="auto",
# restack=restack,
# choose_better=choose_better,
# optimize=optimize,
# fit_kwargs=fit_kwargs,
# verbose=verbose,
# )
def plot_model(
self,
estimator: Optional[Any] = None,
plot: Optional[str] = None,
return_fig: bool = False,
return_data: bool = False,
verbose: bool = False,
display_format: Optional[str] = None,
data_kwargs: Optional[Dict] = None,
fig_kwargs: Optional[Dict] = None,
system: bool = True,
save: Union[str, bool] = False,
) -> Tuple[str, Any]:
"""
This function analyzes the performance of a trained model on holdout set.
When used without any estimator, this function generates plots on the
original data set. When used with an estimator, it will generate plots on
the model residuals.
Example
--------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> arima = create_model('arima')
>>> plot_model(plot = 'ts')
>>> plot_model(plot = 'decomp_classical', data_kwargs = {'type' : 'multiplicative'})
>>> plot_model(estimator = arima, plot = 'forecast', data_kwargs = {'fh' : 24})
estimator: sktime compatible object, default = None
Trained model object
plot: str, default = None
Default is 'ts' when estimator is None, When estimator is not None,
default is changed to 'forecast'. List of available plots (ID - Name):
* 'ts' - Time Series Plot
* 'train_test_split' - Train Test Split
* 'cv' - Cross Validation
* 'acf' - Auto Correlation (ACF)
* 'pacf' - Partial Auto Correlation (PACF)
* 'decomp_classical' - Decomposition Classical
* 'decomp_stl' - Decomposition STL
* 'diagnostics' - Diagnostics Plot
* 'forecast' - "Out-of-Sample" Forecast Plot
* 'insample' - "In-Sample" Forecast Plot
* 'residuals' - Residuals Plot
return_fig: : bool, default = False
When set to True, it returns the figure used for plotting.
return_data: bool, default = False
When set to True, it returns the data for plotting.
If both return_fig and return_data is set to True, order of return
is figure then data.
verbose: bool, default = True
Unused for now
display_format: str, default = None
To display plots in Streamlit (https://www.streamlit.io/), set this to 'streamlit'.
Currently, not all plots are supported.
data_kwargs: dict, default = None
Dictionary of arguments passed to the data for plotting.
fig_kwargs: dict, default = None
Dictionary of arguments passed to the figure object of plotly. Example:
* fig_kwargs = {'fig_size' : [800, 500], 'fig_template' : 'simple_white'}
save: string or bool, default = False
When set to True, Plot is saved as a 'png' file in current working directory.
When a path destination is given, Plot is saved as a 'png' file the given path to the directory of choice.
Returns:
None
"""
# checking display_format parameter
self.plot_model_check_display_format_(display_format=display_format)
# Import required libraries ----
if display_format == "streamlit":
try:
import streamlit as st
except ImportError:
raise ImportError(
"It appears that streamlit is not installed. Do: pip install hpbandster ConfigSpace"
)
if data_kwargs is None:
data_kwargs = {}
if fig_kwargs is None:
fig_kwargs = {}
available_plots_common = [
"ts",
"train_test_split",
"cv",
"acf",
"pacf",
"diagnostics",
"decomp_classical",
"decomp_stl",
]
available_plots_data = available_plots_common
available_plots_model = available_plots_common + [
"forecast",
"insample",
"residuals",
]
return_pred_int = False
return_obj = []
# Type checks
if estimator is not None and isinstance(estimator, str):
raise ValueError(
"Estimator must be a trained object. "
f"You have passed a string: '{estimator}'"
)
# Default plot when no model is specified is the time series plot
# Default plot when model is specified is the forecast plot
if plot is None and estimator is None:
plot = "ts"
elif plot is None and estimator is not None:
plot = "forecast"
data, train, test, predictions, cv, model_name = (
None,
None,
None,
None,
None,
None,
)
if plot == "ts":
data = self._get_y_data(split="all")
elif plot == "train_test_split":
train = self._get_y_data(split="train")
test = self._get_y_data(split="test")
elif plot == "cv":
data = self._get_y_data(split="train")
cv = self.get_fold_generator()
elif estimator is None:
require_full_data = [
"acf",
"pacf",
"diagnostics",
"decomp_classical",
"decomp_stl",
]
if plot in require_full_data:
data = self._get_y_data(split="all")
else:
plots_formatted_data = [f"'{plot}'" for plot in available_plots_data]
raise ValueError(
f"Plot type '{plot}' is not supported when estimator is not provided. Available plots are: {', '.join(plots_formatted_data)}"
)
else:
# Estimator is Provided
if hasattr(self, "_get_model_name") and hasattr(
self, "_all_models_internal"
):
model_name = self._get_model_name(estimator)
else:
# If the model is saved and loaded afterwards,
# it will not have self._get_model_name
model_name = estimator.__class__.__name__
require_insample_predictions = ["insample"]
require_residuals = [
"residuals",
"diagnostics",
"acf",
"pacf",
"decomp_classical",
"decomp_stl",
]
if plot == "forecast":
data = self._get_y_data(split="all")
fh = data_kwargs.get("fh", None)
alpha = data_kwargs.get("alpha", 0.05)
return_pred_int = estimator.get_tag("capability:pred_int")
predictions = self.predict_model(
estimator,
fh=fh,
alpha=alpha,
return_pred_int=return_pred_int,
verbose=False,
)
elif plot in require_insample_predictions:
# Try to get insample forecasts if possible
insample_predictions = self.get_insample_predictions(
estimator=estimator
)
if insample_predictions is None:
return
predictions = insample_predictions
data = self._get_y_data(split="all")
# Do not plot prediction interval for insample predictions
return_pred_int = False
elif plot in require_residuals:
resid = self.get_residuals(estimator=estimator)
if resid is None:
return
resid = self.check_and_clean_resid(resid=resid)
data = resid
else:
plots_formatted_model = [f"'{plot}'" for plot in available_plots_model]
raise ValueError(
f"Plot type '{plot}' is not supported when estimator is provided. Available plots are: {', '.join(plots_formatted_model)}"
)
fig, plot_data = plot_(
plot=plot,
data=data,
train=train,
test=test,
predictions=predictions,
cv=cv,
model_name=model_name,
return_pred_int=return_pred_int,
data_kwargs=data_kwargs,
fig_kwargs=fig_kwargs,
)
plot_name = self._available_plots[plot]
plot_filename = f"{plot_name}.html"
# Per https://github.com/pycaret/pycaret/issues/1699#issuecomment-962460539
if save:
if not isinstance(save, bool):
plot_filename = os.path.join(save, plot_filename)
self.logger.info(f"Saving '{plot_filename}'")
fig.write_html(plot_filename)
### Add file name to return object ----
return_obj.append(plot_filename)
elif system:
if display_format == "streamlit":
st.write(fig)
else:
fig.show()
self.logger.info("Visual Rendered Successfully")
### Add figure and data to return object if required ----
if return_fig:
return_obj.append(fig)
if return_data:
return_obj.append(plot_data)
#### Return None if empty, return as list if more than one object,
# else return object directly ----
if len(return_obj) == 0:
return_obj = None
elif len(return_obj) == 1:
return_obj = return_obj[0]
return return_obj
# def evaluate_model(
# self,
# estimator,
# fold: Optional[Union[int, Any]] = None,
# fit_kwargs: Optional[dict] = None,
# use_train_data: bool = False,
# ):
# """
# This function displays a user interface for analyzing performance of a trained
# model. It calls the ``plot_model`` function internally.
# Example
# --------
# >>> from pycaret.datasets import get_data
# >>> boston = get_data('boston')
# >>> from pycaret.regression import *
# >>> exp_name = setup(data = boston, target = 'medv')
# >>> lr = create_model('lr')
# >>> evaluate_model(lr)
# estimator: scikit-learn compatible object
# Trained model object
# fold: int or scikit-learn compatible CV generator, default = None
# Controls cross-validation. If None, the CV generator in the ``fold_strategy``
# parameter of the ``setup`` function is used. When an integer is passed,
# it is interpreted as the 'n_splits' parameter of the CV generator in the
# ``setup`` function.
# fit_kwargs: dict, default = {} (empty dict)
# Dictionary of arguments passed to the fit method of the model.
# use_train_data: bool, default = False
# When set to true, train data will be used for plots, instead
# of test data.
# Returns:
# None
# Warnings
# --------
# - This function only works in IPython enabled Notebook.
# """
# return super().evaluate_model(
# estimator=estimator,
# fold=fold,
# fit_kwargs=fit_kwargs,
# use_train_data=use_train_data,
# )
# def interpret_model(
# self,
# estimator,
# plot: str = "summary",
# feature: Optional[str] = None,
# observation: Optional[int] = None,
# use_train_data: bool = False,
# **kwargs,
# ):
# """
# This function analyzes the predictions generated from a trained model. Most plots
# in this function are implemented based on the SHAP (SHapley Additive exPlanations).
# For more info on this, please see https://shap.readthedocs.io/en/latest/
# Example
# --------
# >>> from pycaret.datasets import get_data
# >>> boston = get_data('boston')
# >>> from pycaret.regression import *
# >>> exp = setup(data = boston, target = 'medv')
# >>> xgboost = create_model('xgboost')
# >>> interpret_model(xgboost)
# estimator: scikit-learn compatible object
# Trained model object
# plot: str, default = 'summary'
# List of available plots (ID - Name):
# * 'summary' - Summary Plot using SHAP
# * 'correlation' - Dependence Plot using SHAP
# * 'reason' - Force Plot using SHAP
# * 'pdp' - Partial Dependence Plot
# feature: str, default = None
# Feature to check correlation with. This parameter is only required when ``plot``
# type is 'correlation' or 'pdp'. When set to None, it uses the first column from
# the dataset.
# observation: int, default = None
# Observation index number in holdout set to explain. When ``plot`` is not
# 'reason', this parameter is ignored.
# use_train_data: bool, default = False
# When set to true, train data will be used for plots, instead
# of test data.
# **kwargs:
# Additional keyword arguments to pass to the plot.
# Returns:
# None
# """
# return super().interpret_model(
# estimator=estimator,
# plot=plot,
# feature=feature,
# observation=observation,
# use_train_data=use_train_data,
# **kwargs,
# )
def predict_model(
self,
estimator,
fh=None,
return_pred_int=False,
alpha=0.05,
round: int = 4,
verbose: bool = True,
) -> pd.DataFrame:
"""
This function forecast using a trained model. When ``fh`` is None,
it forecasts using the same forecast horizon used during the
training.
Example
-------
>>> from pycaret.datasets import get_data
>>> airline = get_data('airline')
>>> from pycaret.time_series import *
>>> exp_name = setup(data = airline, fh = 12)
>>> arima = create_model('arima')
>>> pred_holdout = predict_model(arima)
>>> pred_unseen = predict_model(finalize_model(arima), fh = 24)
estimator: sktime compatible object
Trained model object
fh: int, default = None
Number of points from the last date of training to forecast.
When fh is None, it forecasts using the same forecast horizon
used during the training.
return_pred_int: bool, default = False
When set to True, it returns lower bound and upper bound
prediction interval, in addition to the point prediction.
alpha: float, default = 0.05
alpha for prediction interval. CI = 1 - alpha.
round: int, default = 4
Number of decimal places to round predictions to.
verbose: bool, default = True
When set to False, holdout score grid is not printed.
Returns:
pandas.DataFrame
"""
data = None # TODO: Add back when we have support for multivariate TS
estimator_ = deep_clone(estimator)
loaded_in_same_env = True
# Check if loaded in a different environment
if not hasattr(self, "X_test") or fh is not None:
# If the model is saved and loaded afterwards,
# it will not have self.X_test
# Also do not display metrics if user provides own fh
# (even if it is same as test set horizon) per
# https://github.com/pycaret/pycaret/issues/1702
loaded_in_same_env = False
verbose = False
if fh is not None:
# Do not display metrics if user provides own fh
# (even if it is same as test set horizon) per
# https://github.com/pycaret/pycaret/issues/1702
verbose = False
if fh is None:
if not hasattr(self, "fh"):
# If the model is saved and loaded afterwards,
# it will not have self.fh
fh = estimator_.fh
else:
# Get the fh in the right format for sktime
fh = self.check_fh(fh)
try:
return_vals = estimator_.predict(
X=data, fh=fh, return_pred_int=return_pred_int, alpha=alpha
)
except NotImplementedError as error:
self.logger.warning(error)
self.logger.warning(
"Most likely, prediction intervals has not been implemented for this "
"algorithm. Predcition will be run with `return_pred_int` = False, and "
"NaN values will be returned for the prediction intervals instead."
)
return_vals = estimator_.predict(
X=data, fh=fh, return_pred_int=False, alpha=alpha
)
if isinstance(return_vals, tuple):
# Prediction Interval is returned
# First Value is a series of predictions
# Second Value is a dataframe of lower and upper bounds
result = pd.concat(return_vals, axis=1)
result.columns = ["y_pred", "lower", "upper"]
else:
# Prediction interval is not returned (not implemented)
if return_pred_int:
result = pd.DataFrame(return_vals, columns=["y_pred"])
result["lower"] = np.nan
result["upper"] = np.nan
else:
# Leave as series
result = return_vals
if result.name is None:
if hasattr(self, "y"):
result.name = self.y.name
else:
# If the model is saved and loaded afterwards,
# it will not have self.y
pass
# Converting to float since rounding does not support int
result = result.astype(float).round(round)
if isinstance(result.index, pd.DatetimeIndex):
result.index = (
result.index.to_period()
) # Prophet with return_pred_int = True returns datetime index.
#################
#### Metrics ####
#################
# Only display if loaded in same environment
# This is not technically y_test_pred in all cases.
# If the model has not been finalized, y_test_pred will match the indices from y_test
# If the model has been finalized, y_test_pred will not match the indices from y_test
# Also, the user can use a different fh length in predict in which case the length
# of y_test_pred will not match y_test.
if loaded_in_same_env:
X_test_ = self.X_test.copy()
# Some predict methods in sktime expect None (not an empty dataframe as
# returned by pycaret). Hence converting to None.
if X_test_.shape[0] == 0 or X_test_.shape[1] == 0:
X_test_ = None
y_test_ = self.y_test.copy()
# y_train for finalized model is different from self.y_train
# Hence, better to get this from the estimator directly.
y_train = estimator_._y
y_test_pred, lower, upper = get_predictions_with_intervals(
forecaster=estimator_, X_test=X_test_, fh=fh, alpha=alpha
)
if len(y_test_pred) != len(y_test_):
msg = (
"predict_model >> Forecast Horizon does not match the horizon length "
"used during training. Metrics will not be displayed."
)
self.logger.warning(msg)
verbose = False
# concatenates by index
y_test_and_pred = pd.concat([y_test_pred, y_test_], axis=1)
# Removes any indices that do not match
y_test_and_pred.dropna(inplace=True)
y_test_pred_common = y_test_and_pred[y_test_and_pred.columns[0]]
y_test_common = y_test_and_pred[y_test_and_pred.columns[1]]
if len(y_test_and_pred) == 0:
self.logger.warning(
"predict_model >> No indices matched between test set and prediction. "
"You are most likely calling predict_model after finalizing model. "
"Metrics will not be displayed."
)
metrics = self._calculate_metrics(y_test=[], pred=[], pred_prob=None) # type: ignore
metrics = {metric_name: np.nan for metric_name, _ in metrics.items()}
verbose = False
else:
# Pass additional keyword arguments (like y_train, lower, upper) to
# method since they need to be passed to certain metrics like MASE,
# INPI, etc. This method will internally orchestrate the passing of
# the right arguments to the scorers.
initial_kwargs = self.get_additional_scorer_kwargs()
additional_scorer_kwargs = update_additional_scorer_kwargs(
initial_kwargs=initial_kwargs,
y_train=y_train,
lower=lower,
upper=upper,
)
metrics = self._calculate_metrics(
y_test=y_test_common,
pred=y_test_pred_common,
pred_prob=None,
**additional_scorer_kwargs,
) # type: ignore
# Display Test Score
# model name
display = None
try:
np.random.seed(self.seed)
if not display:
display = Display(verbose=verbose, html_param=self.html_param,)
except:
display = Display(verbose=False, html_param=False,)
full_name = self._get_model_name(estimator_)
df_score = | pd.DataFrame(metrics, index=[0]) | pandas.DataFrame |
"""Handle the raw data input/output and interface with external formats."""
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
import datetime as dt
def load_stream(path):
"""Loads a Stream object from the file at path.
Args:
path: path to the input file, (for supported formats see,
http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)
Returns:
an obspy.core.Stream object
(http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
"""
stream = read(path)
stream.merge()
# assert len(stream) == 3 # We need X,Y,Z traces
return stream
def load_catalog(path):
"""Loads a event catalog from a .csv file.
Each row in the catalog references a know seismic event.
Args:
path: path to the input .csv file.
Returns:
catalog: A Pandas dataframe.
"""
catalog = pd.read_csv(path)
# Check if utc_timestamp exists, otherwise create it
if 'utc_timestamp' not in catalog.columns:
utc_timestamp = []
for e in catalog.origintime.values:
utc_timestamp.append(UTCDateTime(e).timestamp)
catalog['utc_timestamp'] = utc_timestamp
return catalog
def write_stream(stream, path):
stream.write(path, format='MSEED')
def write_catalog(events, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t.timestamp for t in events])})
catalog.to_csv(path)
def write_catalog_with_clusters(events, clusters, latitudes, longitudes, depths, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t for t in events]),
"cluster_id": | pd.Series([cluster_id for cluster_id in clusters]) | pandas.Series |
from absl import logging
from collections import defaultdict
from credoai.utils.common import to_array, NotRunError, ValidationError
from credoai.metrics import Metric, find_metrics, MODEL_METRIC_CATEGORIES
from credoai.modules.credo_module import CredoModule
from fairlearn.metrics import MetricFrame
from scipy.stats import norm
from sklearn.utils import check_consistent_length
from typing import List, Union
import pandas as pd
class PerformanceModule(CredoModule):
"""
Performance module for Credo AI. Handles any metric that can be
calculated on a set of ground truth labels and predictions,
e.g., binary classification, multiclass classification, regression.
This module takes in a set of metrics and provides functionality to:
- calculate the metrics
- create disaggregated metrics
Parameters
----------
metrics : List-like
list of metric names as string or list of Metrics (credoai.metrics.Metric).
Metric strings should in list returned by credoai.metrics.list_metrics.
Note for performance parity metrics like
"false negative rate parity" just list "false negative rate". Parity metrics
are calculated automatically if the performance metric is supplied
y_true : (List, pandas.Series, numpy.ndarray)
The ground-truth labels (for classification) or target values (for regression).
y_pred : (List, pandas.Series, numpy.ndarray)
The predicted labels for classification
y_prob : (List, pandas.Series, numpy.ndarray), optional
The unthresholded predictions, confidence values or probabilities.
sensitive_features : pandas.DataFrame
The segmentation feature(s) which should be used to create subgroups to analyze.
"""
def __init__(self,
metrics,
y_true,
y_pred,
y_prob=None,
sensitive_features=None
):
super().__init__()
# data variables
self.y_true = to_array(y_true)
self.y_pred = to_array(y_pred)
self.y_prob = to_array(y_prob) if y_prob is not None else None
self.perform_disaggregation = True
if sensitive_features is None:
self.perform_disaggregation = False
# only set to use metric frame
sensitive_features = pd.DataFrame({'NA': ['NA'] * len(self.y_true)})
self.sensitive_features = sensitive_features
self._validate_inputs()
# assign variables
self.metrics = metrics
self.metric_frames = {}
self.performance_metrics = None
self.prob_metrics = None
self.failed_metrics = None
self.update_metrics(metrics)
def run(self):
"""
Run performance base module
Returns
-------
self
"""
self.results = {'overall_performance': self.get_overall_metrics()}
if self.perform_disaggregation:
self.results.update(self.get_disaggregated_performance())
return self
def prepare_results(self, filter=None):
"""Prepares results for Credo AI's governance platform
Structures results for export as a dataframe with appropriate structure
for exporting. See credoai.modules.credo_module.
Parameters
----------
filter : str, optional
Regex string to filter fairness results if only a subset are desired.
Passed as a regex argument to pandas `filter` function applied to the
concatenated output of Fairnessmodule.get_fairness_results and
Fairnessmodule.get_disaggregated_performance, by default None
Returns
-------
pd.DataFrame
Raises
------
NotRunError
Occurs if self.run is not called yet to generate the raw assessment results
"""
if self.results is not None:
if 'overall_performance' in self.results:
results = self.results['overall_performance']
else:
results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['shop_review_num_level_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"2.统计shop_review_num_level历史被购买的次数"
dataFeat['shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"3.统计shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_review_num_level_buy_count,result.shop_review_num_level_count))
result['shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计shop_review_num_level历史未被够买的次数"
result['shop_review_num_level_not_buy_count'] = result['shop_review_num_level_count'] - result['shop_review_num_level_buy_count']
return result
def get_item_category_list_2_feat(data,dataFeat):
"item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat['item_category_list_2'])
result = result.drop_duplicates(['item_category_list_2'],keep='first')
"1.统计item_category_list_2出现次数"
dataFeat['item_category_list_2_count'] = dataFeat['item_category_list_2']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['item_category_list_2_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"2.统计item_category_list_2历史被购买的次数"
dataFeat['item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"3.统计item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_category_list_2_buy_count,result.item_category_list_2_count))
result['item_category_list_2_buy_ratio'] = buy_ratio
"4.统计item_category_list_2历史未被够买的次数"
result['item_category_list_2_not_buy_count'] = result['item_category_list_2_count'] - result['item_category_list_2_buy_count']
return result
def get_user_item_feat(data,dataFeat):
"user-item的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_id']])
result = result.drop_duplicates(['user_id','item_id'],keep='first')
"1.统计user-item出现次数"
dataFeat['user_item_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_count',aggfunc='count').reset_index()
del dataFeat['user_item_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"2.统计user-item历史被购买的次数"
dataFeat['user_item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_buy_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"3.统计user-item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_buy_count,result.user_item_count))
result['user_item_buy_ratio'] = buy_ratio
"4.统计user-item历史未被够买的次数"
result['user_item_not_buy_count'] = result['user_item_count'] - result['user_item_buy_count']
return result
def get_user_shop_feat(data,dataFeat):
"user-shop的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_id']])
result = result.drop_duplicates(['user_id','shop_id'],keep='first')
"1.统计user-shop出现次数"
dataFeat['user_shop_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_count',aggfunc='count').reset_index()
del dataFeat['user_shop_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"2.统计user-shop历史被购买的次数"
dataFeat['user_shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"3.统计user-shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_buy_count,result.user_shop_count))
result['user_shop_buy_ratio'] = buy_ratio
"4.统计user-shop历史未被够买的次数"
result['user_shop_not_buy_count'] = result['user_shop_count'] - result['user_shop_buy_count']
return result
def get_user_context_feat(data,dataFeat):
"user-context的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_id']])
result = result.drop_duplicates(['user_id','context_id'],keep='first')
"1.统计user-context出现次数"
dataFeat['user_context_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_count',aggfunc='count').reset_index()
del dataFeat['user_context_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"2.统计user-context历史被购买的次数"
dataFeat['user_context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_buy_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"3.统计user-context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_buy_count,result.user_context_count))
result['user_context_buy_ratio'] = buy_ratio
"4.统计user-context历史未被够买的次数"
result['user_context_not_buy_count'] = result['user_context_count'] - result['user_context_buy_count']
return result
def get_user_timestamp_feat(data,dataFeat):
"user-context_timestamp的特征提取"
result = | pd.DataFrame(dataFeat[['user_id','context_timestamp']]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
myfile="../files/train_final.csv"
if os.path.isfile(myfile):
os.remove(myfile)
with open("../files/train.csv", "r") as rd:
with open("../files/train_final.csv", "a") as wr:
for i in range(1, 13):
wr.write("x" + str(i))
if (i != 12): wr.write(",")
wr.write("\n")
for line in rd:
line = line.strip()
if (line.startswith("[")):
line = line[1:len(line)]
if (line.endswith("]")):
line = line[:len(line) - 1]
wr.write(line + "\n")
train = pd.read_csv("../files/train_final.csv")
train["x1"] = pd.to_numeric(train["x1"], errors='coerce')
train["x2"] = pd.to_numeric(train["x2"], errors='coerce')
train["x3"] = pd.to_numeric(train["x3"], errors='coerce')
train["x4"] = pd.to_numeric(train["x4"], errors='coerce')
train["x5"] = pd.to_numeric(train["x5"], errors='coerce')
train["x6"] = | pd.to_numeric(train["x6"], errors='coerce') | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 10:31:37 2021
@author: <NAME> -workshop-LA-UP_IIT
speed: subway-about 60 k/h;bus-about 45-50 (45) k/h;walk-about 3.6-7.2 (5)k/m. 1k(ilometer)=1000m
"""
import networkx as nx
import numpy as np
def postSQL2gpd(table_name,geom_col='geometry',**kwargs):
from sqlalchemy import create_engine
import geopandas as gpd
engine=create_engine("postgres://{myusername}:{mypassword}@localhost:5432/{mydatabase}".format(myusername=kwargs['myusername'],mypassword=kwargs['<PASSWORD>'],mydatabase=kwargs['mydatabase']))
gdf=gpd.read_postgis(table_name, con=engine,geom_col=geom_col)
print("_"*50)
print('The data has been read from PostSQL database...')
return gdf
flatten_lst=lambda lst: [m for n_lst in lst for m in flatten_lst(n_lst)] if type(lst) is list else [lst]
def bus_network(b_centroid_,bus_stations_,bus_routes_,speed,**kwargs): #
import copy
import pandas as pd
import networkx as nx
from shapely.ops import nearest_points
from shapely.ops import substring
from tqdm import tqdm
#compute the distance between the site centroid and each bus station and get the nearest ones by given threshold
bus_stations=copy.deepcopy(bus_stations_)
bus_stations['center_distance']=bus_stations.geometry.apply(lambda row:row.distance(b_centroid_.geometry.values[0]))
# bus_stations.sort_values(by=['center_distance'],inplace=True)
# print(bus_stations)
start_stops=bus_stations[bus_stations.center_distance<=kwargs['start_stops_distance']]
start_stops_lineUID=start_stops.LineUid.unique()
start_stops_PointUid=start_stops.PointUid.unique()
#build bus stations network
bus_staions_routes=pd.merge(bus_stations,bus_routes_,on='LineUid')
bus_staions_routes_idx_LineUid=bus_staions_routes.set_index('LineUid',append=True,drop=False)
lines_group_list=[]
s_e_nodes=[]
# i=0
for LineUid,sub_df in tqdm(bus_staions_routes_idx_LineUid.groupby(level=1)):
# print(sub_df)
# print(sub_df.columns)
sub_df['nearestPts']=sub_df.apply(lambda row:nearest_points(row.geometry_y,row.geometry_x)[0],axis=1)
sub_df['project_norm']=sub_df.apply(lambda row:row.geometry_y.project(row.nearestPts,normalized=True),axis=1)
sub_df.sort_values(by='project_norm',inplace=True)
sub_df['order_idx']=range(1,len(sub_df)+1)
# station_geometries=sub_df.geometry_x.to_list()
project=sub_df.project_norm.to_list()
sub_df['second_project']=project[1:]+project[:1]
PointName=sub_df.PointName.to_list()
sub_df['second_PointName']=PointName[1:]+PointName[:1]
PointUid=sub_df.PointUid.to_list()
sub_df['second_PointUid']= PointUid[1:]+ PointUid[:1]
sub_df['substring']=sub_df.apply(lambda row:substring(row.geometry_y,row.project_norm,row.second_project,normalized=True),axis=1)
sub_df['forward_length']=sub_df.apply(lambda row:row.substring.length,axis=1)
sub_df['time_cost']=sub_df.apply(lambda row:row.forward_length/(speed*1000)*60,axis=1)
sub_df['edges']=sub_df.apply(lambda row:[(row.PointUid,row.second_PointUid),(row.second_PointUid,row.PointUid)],axis=1)
lines_group_list.append(sub_df)
s_e_nodes.append(sub_df.edges.to_list()[-1][0])
# print(i)
# i+=1
lines_df4G=pd.concat(lines_group_list)
# G=nx.Graph()
G=nx.from_pandas_edgelist(df=lines_df4G,source='PointUid',target='second_PointUid',edge_attr=['PointName','second_PointName','forward_length','geometry_x','time_cost'])
for idx,row in lines_df4G.iterrows():
G.nodes[row['PointUid']]['position']=(row.geometry_x.x,row.geometry_x.y)
G.nodes[row['PointUid']]['station_name']=row.PointName
return G,s_e_nodes,start_stops_PointUid,lines_df4G
def transfer_stations_network(station_geometries_df,transfer_distance,transfer_weight_ratio,speed):
import copy
from tqdm import tqdm
import pandas as pd
import networkx as nx
transfer_df_list=[]
station_geometries_dict=station_geometries_df.to_dict('record')
# i=0
for pt in tqdm(station_geometries_dict):
station_geometries_df_=copy.deepcopy(station_geometries_df)
station_geometries_df_['distance']=station_geometries_df_.geometry_x.apply(lambda row:row.distance(pt['geometry_x']))
transfer_df=station_geometries_df_[station_geometries_df_.distance<=transfer_distance]
transfer_df=transfer_df[transfer_df.distance!=0]
transfer_df.drop_duplicates(subset='PointUid',keep='first',inplace=True)
transfer_df['source_station']=pt['PointUid']
transfer_df['forward_length']=transfer_df.distance*transfer_weight_ratio
# print(transfer_df['forward_length'])
transfer_df=transfer_df[transfer_df.LineUid!=pt['LineUid']]
# print(transfer_df)
transfer_df_list.append(transfer_df)
# if i==1:break
# i+=1
transfer_df_concat= | pd.concat(transfer_df_list) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # COVID-19 Exploratory Data Analysis
# > (Almost) Everything You Want To Know About COVID-19.
#
# - author: <NAME>
# - comments: true
# - categories: [EDA]
# - permalink: /corona-eda/
# - toc: true
# - image: images/copied_from_nb/covid-eda-2-1.png
# These visualizations were made by [<NAME>](https://twitter.com/imdevskp). Original notebook is [here](https://www.kaggle.com/imdevskp/covid-19-analysis-viz-prediction-comparisons).
# In[1]:
#hide
# essential libraries
import json
import random
from urllib.request import urlopen
# storing and anaysis
import numpy as np
import pandas as pd
# visualization
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import plotly.figure_factory as ff
import folium
# color pallette
cnf = '#393e46' # confirmed - grey
dth = '#ff2e63' # death - red
rec = '#21bf73' # recovered - cyan
act = '#fe9801' # active case - yellow
# converter
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# hide warnings
import warnings
warnings.filterwarnings('ignore')
# html embedding
from IPython.display import Javascript
from IPython.core.display import display, HTML
# In[2]:
#hide
# importing datasets
url = 'https://raw.githubusercontent.com/imdevskp/covid_19_jhu_data_web_scrap_and_cleaning/master/covid_19_clean_complete.csv'
full_table = pd.read_csv(url,
parse_dates=['Date'])
full_table.head()
# In[3]:
#hide
# cases
cases = ['Confirmed', 'Deaths', 'Recovered', 'Active']
# Active Case = confirmed - deaths - recovered
full_table['Active'] = full_table['Confirmed'] - full_table['Deaths'] - full_table['Recovered']
# replacing Mainland china with just China
full_table['Country/Region'] = full_table['Country/Region'].replace('Mainland China', 'China')
# filling missing values
full_table[['Province/State']] = full_table[['Province/State']].fillna('')
full_table[cases] = full_table[cases].fillna(0)
# In[4]:
#hide
# cases in the ships
ship = full_table[full_table['Province/State'].str.contains('Grand Princess')|full_table['Province/State'].str.contains('Diamond Princess cruise ship')]
# china and the row
china = full_table[full_table['Country/Region']=='China']
row = full_table[full_table['Country/Region']!='China']
# latest
full_latest = full_table[full_table['Date'] == max(full_table['Date'])].reset_index()
china_latest = full_latest[full_latest['Country/Region']=='China']
row_latest = full_latest[full_latest['Country/Region']!='China']
# latest condensed
full_latest_grouped = full_latest.groupby('Country/Region')['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
china_latest_grouped = china_latest.groupby('Province/State')['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
row_latest_grouped = row_latest.groupby('Country/Region')['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
# # World-Wide Totals
# In[5]:
#hide
temp = full_table.groupby(['Country/Region', 'Province/State'])['Confirmed', 'Deaths', 'Recovered', 'Active'].max()
# temp.style.background_gradient(cmap='Reds')
# In[6]:
#hide_input
temp = full_table.groupby('Date')['Confirmed', 'Deaths', 'Recovered', 'Active'].sum().reset_index()
temp = temp[temp['Date']==max(temp['Date'])].reset_index(drop=True)
temp.style.background_gradient(cmap='Pastel1')
# # Progression of Virus Over Time
# In[7]:
#hide_input
# https://app.flourish.studio/visualisation/1571387/edit
HTML('''<div class="flourish-embed flourish-bar-chart-race" data-src="visualisation/1571387"><script src="https://public.flourish.studio/resources/embed.js"></script></div>''')
# ## Cumalitive Outcomes
# In[8]:
#hide
temp = full_table.groupby('Date')['Recovered', 'Deaths', 'Active'].sum().reset_index()
temp = temp.melt(id_vars="Date", value_vars=['Recovered', 'Deaths', 'Active'],
var_name='Case', value_name='Count')
temp.head()
fig = px.area(temp, x="Date", y="Count", color='Case',
title='Cases over time', color_discrete_sequence = [rec, dth, act])
fig.write_image('covid-eda-2-1.png')
# 
# ## Recovery and Mortality Rate
# In[9]:
#hide
temp = full_table.groupby('Date').sum().reset_index()
# adding two more columns
temp['No. of Deaths to 100 Confirmed Cases'] = round(temp['Deaths']/temp['Confirmed'], 3)*100
temp['No. of Recovered to 100 Confirmed Cases'] = round(temp['Recovered']/temp['Confirmed'], 3)*100
# temp['No. of Recovered to 1 Death Case'] = round(temp['Recovered']/temp['Deaths'], 3)
temp = temp.melt(id_vars='Date', value_vars=['No. of Deaths to 100 Confirmed Cases', 'No. of Recovered to 100 Confirmed Cases'],
var_name='Ratio', value_name='Value')
fig = px.line(temp, x="Date", y="Value", color='Ratio', log_y=True,
title='Recovery and Mortality Rate Over The Time', color_discrete_sequence=[dth, rec])
fig.write_image('covid-eda-2-2.png')
# 
# ## No. of Places To Which COVID-19 spread
# In[10]:
#hide
c_spread = china[china['Confirmed']!=0].groupby('Date')['Province/State'].unique().apply(len)
c_spread = pd.DataFrame(c_spread).reset_index()
fig = px.line(c_spread, x='Date', y='Province/State', text='Province/State',
title='Number of Provinces/States/Regions of China to which COVID-19 spread over the time',
color_discrete_sequence=[cnf,dth, rec])
fig.update_traces(textposition='top center')
fig.write_image('covid-eda-3-1.png')
# ------------------------------------------------------------------------------------------
spread = full_table[full_table['Confirmed']!=0].groupby('Date')['Country/Region'].unique().apply(len)
spread = | pd.DataFrame(spread) | pandas.DataFrame |
"""
QIIME2 2019.10 uses pandas verion 0.18.
As a result, could not use pandas.testing module
"""
from qiime2_helper.filter_by_abundance import (
percent_value_operation,
calculate_percent_value,
filter_by_abundance,
merge_df,
subset_df
)
import unittest
import pandas as pd
import numpy as np
class PercentValueOperationTestCase(unittest.TestCase):
def test_non_zero_data(self):
data = pd.Series([10, 20, 30, 40])
expected = pd.Series([0.1, 0.2, 0.3, 0.4])
observed = percent_value_operation(data)
isSeriesEqual = observed.eq(expected).all()
self.assertTrue(isSeriesEqual)
def test_zero_data(self):
data = | pd.Series([0, 0, 0, 0]) | pandas.Series |
# -*- coding: utf-8 -*-
import pandas as pd
import plotly.graph_objs as go
import requests
from base64 import b64encode as be
from dash_html_components import Th, Tr, Td, A
from datetime import datetime, timedelta
from flask import request
from folium import Map
from operator import itemgetter
from os.path import join, dirname, realpath
from random import randint
from requests.auth import HTTPBasicAuth
from .maputils import create_dcircle_marker, create_tcircle_marker
from .utils import (
api_request_to_json,
json_to_dataframe,
starttime_str_to_seconds,
)
TMP = join(dirname(realpath(__file__)), '../tmp/')
LCL = join(dirname(realpath(__file__)), '../images/')
def get_rsam(ch, st):
j = api_request_to_json(f'rsam?channel={ch}&starttime={st}')
data = []
d = pd.DataFrame(j['records'][ch])
if not d.empty:
d.set_index('date', inplace=True)
data = [go.Scatter(
x=d.index,
y=d.rsam,
mode='markers',
marker=dict(size=4)
)]
return {
'data': data,
'layout': {
'margin': {
't': 30
},
'xaxis': {
'range': [d.index.min(), d.index.max()]
},
'yaxis': {
'range': [d.rsam.min() - 20, 2 * d.rsam.mean()]
}
}
}
def get_tilt(ch, st):
j = api_request_to_json(f'tilt?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d['radial'],
'name': f"radial {j['used_azimuth']:.1f}"
})
traces.append({
'x': d.index,
'y': d['tangential'],
'name': f"tangential {j['tangential_azimuth']:.1f}"
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_rtnet(ch, st):
j = api_request_to_json(f'rtnet?channel={ch}&starttime={st}')
d = pd.DataFrame(j['records'][ch])
traces = []
if not d.empty:
d.set_index('date', inplace=True)
traces.append({
'x': d.index,
'y': d.east,
'name': 'East',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.north,
'name': 'North',
'mode': 'markers',
'marker': dict(
size=4
)
})
traces.append({
'x': d.index,
'y': d.up,
'name': 'Up',
'mode': 'markers',
'marker': dict(
size=4
)
})
return {
'data': traces,
'layout': {
'margin': {
't': 30
}
}
}
def get_and_store_hypos(geo, st, current_data):
if is_data_needed(st, current_data):
return get_hypos(geo, st).to_json()
else:
return current_data
def is_data_needed(st, data):
if not data:
return True
now = datetime.now()
olddata = | pd.read_json(data) | pandas.read_json |
### LR model to predict binary outcomes ###
# The script is divided in 4 parts:
# 1. Data rformatting
# 2. Hyperparameter Tuning (HT_results)
# 3. Model training and cross validation (CV_results)
# 4. Model training and predictions (TEST_results)
## Intended to be run with arguments:
# bsub "python LR_models_training_and_evaluation_BINARY.py Day1 D1_raw_data D1_no_pda D1_no_pda_IM"
###############################################
##### 1. DATA PREPARATION AND ASSESSMENT #####
###############################################
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import sys, getopt
# TO PASS THE ARGUMENTS:
day = sys.argv[1]
data_type = sys.argv[2]
demog = sys.argv[3]
outcome = sys.argv[4]
# Example:
# day = 'Day1'
# data_type = 'D1_raw_data'
# demog = 'D1_pda'
# outcome = 'D1_pda_IM'
# RESULTS PATHS:
# results_root = results_root_path
# assessment_path = results_root+'assessment/'
# ht_path = results_root+'HT_results/'
# cv_path = results_root+'CV_results/'
# test_path = results_root+'TEST_results/'
# TO READ THE INPUT DATA (The datasets have been previously created to include only the relevant variables)
# root_path = root_path
file_name = day+'/'+data_type+'/'+demog+'/'+outcome+'.txt'
TTdata = root_path + file_name
df = pd.read_table(TTdata)
df = df.set_index('AE')
input_variables = list(df.columns)
with open(assessment_path+'input_data_column_names.txt', "w") as output:
output.write(str(input_variables))
# DATA PROCESSING: Features and Targets and Convert Data to Arrays
# Outcome (or labels) are the values to be predicted
outcome = pd.DataFrame(df['IM'])
descriptors = df.drop('IM', axis = 1) # Remove the labels from the features (or descriptors)
descriptors_list = list(descriptors.columns) # Saving feature names for later use
with open(assessment_path+'input_data_features.txt', "w") as output:
output.write(str(descriptors_list))
# TRAINING/VALIDATION (TV, for hyperparameter tuning) and TEST (Tt, for model evaluation) Sets:
# To Split the data into training and testing sets:
TV_features_df, Tt_features_df, TV_outcome_df, Tt_outcome_df = train_test_split(descriptors, outcome,
test_size = 0.30, random_state = 11,
stratify=outcome)
# To transform to numpy arrays without index:
TV_features = np.array(TV_features_df)
Tt_features = np.array(Tt_features_df)
TV_outcome = np.array(TV_outcome_df['IM'])
Tt_outcome = np.array(Tt_outcome_df['IM'])
# Percentage of indviduals in each class:
TV_class_frac = np.bincount(TV_outcome)*100/len(TV_outcome)
Tt_class_frac = np.bincount(Tt_outcome)*100/len(Tt_outcome)
# Save it:
fractions = pd.DataFrame(index=['TV', 'Test'],columns=['0','1'])
fractions.loc['TV'] = TV_class_frac.reshape(-1, len(TV_class_frac))
fractions.loc['Test'] = Tt_class_frac.reshape(-1, len(Tt_class_frac))
fractions.to_csv(assessment_path+'perc_class_split.csv', index=True)
print('All done for 1. DATA PREPARATION AND ASSESSMENT')
###############################################
##### 2.HYPERPARAMETER TUNING ###########
###############################################
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# 4. Hyperparameter tunning: Stratified CV with Random Grid
penalty = ['l1','l2']
C_param_range = [0.001,0.01,0.1,1,10,100,1000]
# To Create the grid
param_grid = [
{'C': C_param_range}]
# print(random_grid)
# Use the random grid to search for best hyperparameters
# First create the base model to tune:
lr = LogisticRegression(class_weight='balanced', max_iter=1000)
# Random search of parameters, using 3 fold cross validation,
# search across 100 different combinations, and use all available cores
lr_grid = GridSearchCV(estimator = lr, param_grid = param_grid,
scoring = 'roc_auc', # With very unbalanced datasets, using accuracy as scoring metric to choose the best combination of parameters will not be the best strategy. Use ROC Area instead
return_train_score=True,
cv = 5,
verbose=2,
n_jobs = -1)
# Note: For integer/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used.
# Fit the random search model
lr_grid.fit(TV_features, TV_outcome)
## To see the best parameters and results:
lr_grid.best_params_
lr_grid.cv_results_
lr_grid.best_score_
lr_grid.best_index_
lr_grid.scorer_
# To save the csv with the following details:
# Hyperparameter tuning results:
results = | pd.DataFrame.from_dict(lr_grid.cv_results_) | pandas.DataFrame.from_dict |
# Copyright 2021 The ProLoaF Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ==============================================================================
"""
Preprocesses your input data for use with ProLoaF
Transforms the data to a common format (pandas.DataFrame as csv) for all stations.
Notes
-----
- This script can load xlsx or csv files.
- If your data does not match the criteria, you can use a custom script that saves your
data as a pandas.DataFrame with datetimeindex to a csv file with a “;” as separator to
accomplish the same thing.
"""
import pandas as pd
import numpy as np
import sys
import json
import os
MAIN_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(MAIN_PATH)
from utils.config_util import read_config, parse_basic
#Import customized functions below this point
import utils.datatuner as dt
def load_raw_data_xlsx(files):
"""
Load data from an xlsx file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the xlsx file
+ ('date_column') the name of the date_column in the raw_data
+ ('time_zone') specifier for the timezone the raw data is recorded in
+ ('sheet_name') name or list of names of the sheets that are to be read
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('start_column') Columns between this and ('end_column') are loaded
+ ('end_column')
"""
print('Importing XLSX Data...')
combined_files = []
individual_files = []
for xlsx_file in files:
print('importing ' + xlsx_file['file_name'])
# if isinstance(file_name, str):
# file_name = [file_name,'UTC']
date_column = xlsx_file['date_column']
raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],
parse_dates=[date_column])
# convert load data to UTC
if(xlsx_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (xlsx_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
if(xlsx_file['data_abs']):
raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()
# rename column IDs, specifically Time, this will be used later as the df index
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
raw_data.head() # now the data is positive and set to UTC
raw_data.info()
# interpolating for missing entries created by asfreq and original missing values if any
raw_data.interpolate(method='time', inplace=True)
if(xlsx_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files))
return individual_files
def load_raw_data_csv(files):
"""
Load data from a csv file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the load_file
+ ('date_column') the name of the date_column in the raw_data
+ ('dayfirst') specifier for the formatting of the read time
+ ('sep') separator used in this file
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('use_columns') list of columns that are loaded
"""
print('Importing CSV Data...')
combined_files = []
individual_files = []
for csv_file in files:
print('Importing ' + csv_file['file_name'] + ' ...')
date_column = csv_file['date_column']
raw_data = pd.read_csv(INPATH + csv_file['file_name'], sep=csv_file['sep'], usecols=csv_file['use_columns'], parse_dates=[date_column] , dayfirst=csv_file['dayfirst'])
# pd.read_csv(INPATH + name, sep=sep, usecols=cols, parse_dates=[date_column] , dayfirst=dayfirst)
if (csv_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(csv_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (csv_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
print('...Importing finished. ')
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
if(csv_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files, sort = False))
#for frame in individual_files:
# frame.rename(columns={date_column: 'Time'}, inplace=True)
return individual_files
def set_to_hours(df):
"""
Sets the index of the DataFrame to 'Time' and the frequency to hours.
Parameters
----------
df : pandas.DataFrame
The DataFrame whose index and frequency are to be changed
Returns
-------
df
The modified DataFrame
"""
df['Time'] = | pd.to_datetime(df['Time']) | pandas.to_datetime |
import pandas as pd
import os
from ML_algorithms import LogisticReg, KNN, RandomForest
import module
(dirname, prom) = os.path.split(os.path.dirname(__file__))
resource_folder = input("Resource folder: ")
graph_num = input("Graph number: ")
size = int(input("Sample size: "))
df_positive = pd.read_csv(os.path.join(dirname,
"Link Prediction\\Resources\\{}\\positive_feat_{}.csv".format(resource_folder, graph_num)), nrows = size)
df_positive.dropna(1, inplace = True)
df_negative = pd.read_csv(os.path.join(dirname,
"Link Prediction\\Resources\\{}\\negative_feat_{}.csv".format(resource_folder, graph_num)), nrows = size)
df_negative.dropna(1, inplace = True)
finale_dataframe = | pd.concat([df_positive, df_negative]) | pandas.concat |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
import pandas as pd
import pytest
from mars.dataframe import DataFrame, Series, ArrowStringDtype
from mars.tests.core import require_cudf
@pytest.mark.parametrize(
'distinct_opt',
['0'] if sys.platform.lower().startswith('win') else ['0', '1']
)
def test_sort_values_execution(setup, distinct_opt):
os.environ['PSRS_DISTINCT_COL'] = distinct_opt
df = pd.DataFrame(np.random.rand(100, 10), columns=['a' + str(i) for i in range(10)])
# test one chunk
mdf = DataFrame(df)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a6', 'a7'], ascending=False).execute().fetch()
expected = df.sort_values(['a6', 'a7'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test psrs
mdf = DataFrame(df, chunk_size=10)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a3', 'a4']).execute().fetch()
expected = df.sort_values(['a3', 'a4'])
pd.testing.assert_frame_equal(result, expected)
# test ascending=False
result = mdf.sort_values(['a0', 'a1'], ascending=False).execute().fetch()
expected = df.sort_values(['a0', 'a1'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a7'], ascending=False).execute().fetch()
expected = df.sort_values(['a7'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test multiindex
df2 = df.copy(deep=True)
df2.columns = pd.MultiIndex.from_product([list('AB'), list('CDEFG')])
mdf = DataFrame(df2, chunk_size=5)
result = mdf.sort_values([('A', 'C')]).execute().fetch()
expected = df2.sort_values([('A', 'C')])
pd.testing.assert_frame_equal(result, expected)
# test rechunk
mdf = DataFrame(df, chunk_size=3)
result = mdf.sort_values('a0').execute().fetch()
expected = df.sort_values('a0')
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a3', 'a4']).execute().fetch()
expected = df.sort_values(['a3', 'a4'])
pd.testing.assert_frame_equal(result, expected)
# test other types
raw = pd.DataFrame({'a': np.random.rand(10),
'b': np.random.randint(1000, size=10),
'c': np.random.rand(10),
'd': [np.random.bytes(10) for _ in range(10)],
'e': [pd.Timestamp(f'201{i}') for i in range(10)],
'f': [pd.Timedelta(f'{i} days') for i in range(10)]
},)
mdf = DataFrame(raw, chunk_size=3)
for label in raw.columns:
result = mdf.sort_values(label).execute().fetch()
expected = raw.sort_values(label)
pd.testing.assert_frame_equal(result, expected)
result = mdf.sort_values(['a', 'b', 'e'], ascending=False).execute().fetch()
expected = raw.sort_values(['a', 'b', 'e'], ascending=False)
pd.testing.assert_frame_equal(result, expected)
# test nan
df = pd.DataFrame({
'col1': ['A', 'A', 'B', 'B', 'D', 'C'],
'col2': [2, 1, 9, np.nan, 7, 4],
'col3': [0, 1, 9, 4, 2, 3],
})
mdf = DataFrame(df)
result = mdf.sort_values(['col2']).execute().fetch()
expected = df.sort_values(['col2'])
pd.testing.assert_frame_equal(result, expected)
mdf = DataFrame(df, chunk_size=3)
result = mdf.sort_values(['col2']).execute().fetch()
expected = df.sort_values(['col2'])
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
from __future__ import absolute_import
import pandas as pd
import json
def _get_pyarrow_dtypes(schema, categories):
"""Convert a pyarrow.Schema object to pandas dtype dict"""
# Check for pandas metadata
has_pandas_metadata = schema.metadata is not None and b"pandas" in schema.metadata
if has_pandas_metadata:
pandas_metadata = json.loads(schema.metadata[b"pandas"].decode("utf8"))
pandas_metadata_dtypes = {
c.get("field_name", c.get("name", None)): c["numpy_type"]
for c in pandas_metadata.get("columns", [])
}
tz = {
c.get("field_name", c.get("name", None)): c["metadata"].get(
"timezone", None
)
for c in pandas_metadata.get("columns", [])
if c["metadata"]
}
else:
pandas_metadata_dtypes = {}
dtypes = {}
for i in range(len(schema)):
field = schema[i]
# Get numpy_dtype from pandas metadata if available
if field.name in pandas_metadata_dtypes:
if field.name in tz:
numpy_dtype = (
| pd.Series([], dtype="M8[ns]") | pandas.Series |
import os
import requests
import pandas as pd
import json
import sqlalchemy
from sqlalchemy import create_engine
from flask import Flask, request, render_template, jsonify
import pymysql
pymysql.install_as_MySQLdb()
is_heroku = False
if 'IS_HEROKU' in os.environ:
is_heroku = True
remote_db_endpoint = os.environ.get('remote_db_endpoint')
remote_db_port = os.environ.get('remote_db_port')
remote_db_name = os.environ.get('remote_db_name')
remote_db_user = os.environ.get('remote_db_user')
remote_db_pwd = os.environ.get('remote_db_pwd')
x_rapidapi_key = os.environ.get('x_rapidapi_key')
x_rapidapi_host = os.environ.get('x_rapidapi_host')
spoonacular_API = os.environ.get('spoonacular_API')
else:
from config import remote_db_endpoint, remote_db_port, remote_db_name, remote_db_user, remote_db_pwd
from config import x_rapidapi_key, x_rapidapi_host, spoonacular_API
###################################################
###################################################
###################################################
# getIngredients()
###################################################
###################################################
###################################################
def getIngredients(capture_list):
global final_final_grocery_list_df
final_final_grocery_list_df = pd.DataFrame
#######################################
# consider separating this part into a function
# recipe_ids_list = [1554861, 1560677]
recipe_ingredients = []
print('######### in getIngredients ############')
print(capture_list)
# ingredients stuff
for result in capture_list:
try:
recipe_id = result['id']
recipe_title = result['title']
analyzedInstructions = result['analyzedInstructions']
except Exception as e:
print('--- error with something ---')
print(e)
continue
instruction_steps = analyzedInstructions[0]['steps'] # Brooke addition
counter = 0 # Brooke addition
# INSTRUCTIONS ##############################
for item in instruction_steps: # Brooke addition
counter = counter + 1 # Brooke addition
step = item['step'] # Brooke addition
numbered_step = f'{counter}. {step}' # Brooke addition
recipe_steps.append(numbered_step) # Brooke addition
# INGREDIENTS ###############################
for instruction in analyzedInstructions:
steps = instruction['steps']
for step in steps:
ingredients = step['ingredients']
for ingredient in ingredients:
ingredient_name = ingredient['name']
recipe_ingredient = {
'recipe_id': recipe_id,
'recipe_title': recipe_title,
'ingredient_name': ingredient_name
}
recipe_ingredients.append(recipe_ingredient)
ingredients_df = pd.DataFrame(recipe_ingredients)
# dedupe ingredients df
# ingredients_df.drop_duplicates()
ingredients_df.drop_duplicates(subset=['ingredient_name'], inplace=True)
cloud_engine = create_engine(f"mysql://{remote_db_user}:{remote_db_pwd}@{remote_db_endpoint}:{remote_db_port}/{remote_db_name}")
cloud_conn = cloud_engine.connect()
#%% Querying the database
query = '''
SELECT DISTINCT
ingredient,
price,
title,
size
FROM
products_subset
'''
products_subset = pd.read_sql(query, cloud_conn)
# Renamed to GROCERY DF for clarity
# Cut down to a single return for each ingredient
grocery_df = products_subset
grocery_df.drop_duplicates(subset='ingredient', keep='first', inplace=True)
grocery_df = grocery_df.rename(columns={"title": "ingredient_title"})
# print(len(grocery_df))
# grocery_df.head()
recipe_ingredients_df = ingredients_df
recipe_ingredients_df = recipe_ingredients_df.rename(columns={"ingredient_name": "ingredient"})
# recipe_ingredients_df.head()
print('###### WHAT PYTHON THJINKS ARE THE DF KEYS ################')
print(recipe_ingredients_df.keys())
print(grocery_df.keys())
final_final_grocery_list_df = pd.merge(recipe_ingredients_df, grocery_df, how="inner", on=["ingredient", "ingredient"])
cloud_conn.close()
return final_final_grocery_list_df
# if final_final_grocery_list_df == True:
# print ("YEAH, BABY! We globalized.")
###################################################
#####################
#####################
# getRecipeMetadata
##################################################
##################################################
##################################################
def getRecipeMetadata(query, cuisine, diet, type_of_recipe, intolerances):
#######################################
# consider separating this part into a function
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/searchComplex"
# these will come from form controls
query = query
cuisine = cuisine
diet = diet
type_of_recipe = type_of_recipe
intolerances = intolerances
# ranking = "2"
minCalories = "0"
maxCalories = "15000"
# minFat = "5"
# maxFat = "100"
# minProtein = "5"
# maxProtein = "100"
# minCarbs = "5"
# maxCarbs = "100"
querystring = {"limitLicense": "<REQUIRED>",
"offset": "0",
"number": "10",
"query": query,
"cuisine": cuisine,
"diet": diet,
"type": type_of_recipe,
"intolerances": intolerances, # NEW
# NEW
#"includeIngredients": "onions, lettuce, tomato",
#"excludeIngredients": "coconut, mango",
#"intolerances": "peanut, shellfish",
# "ranking": ranking,
"minCalories": minCalories,
"maxCalories": maxCalories,
# "minFat": minFat,
# "maxFat": maxFat,
# "minProtein": minProtein,
# "maxProtein": maxProtein,
# "minCarbs": minCarbs,
# "maxCarbs": maxCarbs,
"instructionsRequired": "True",
"addRecipeInformation": "True",
"fillIngredients": "True",
}
print(querystring)
headers = {
'x-rapidapi-key': x_rapidapi_key,
'x-rapidapi-host': x_rapidapi_host
}
response = requests.get(url, headers=headers, params=querystring)
response_json = response.json()
results = response_json['results']
# consider making everything above part of a separate function
#######################################
recipe_metadata_list = []
# recipe_steps = []
# ingredients stuff
for result in results:
try:
recipe_id = result['id']
recipe_title = result['title']
cooking_minutes = result['cookingMinutes']
source_url = result['sourceUrl']
image = result['image']
# Brooke modification / previously, it had been 'likes'
# cuisine = result['cuisines'][0] # Brooke addition (my slicing may not work; my method used a df)
calories_serving = result['calories'] # Brooke addition
# Brooke addition
servings = result['servings'] # Brooke addition
analyzedInstructions = result['analyzedInstructions']
except Exception as e:
print(e)
print(result.keys())
continue
# 'directions': recipe_steps
# # we need to figure out what this block is...
# for result in results:
# servings = result['servings']
instruction_steps = analyzedInstructions[0]['steps'] # Brooke addition
counter = 0
recipe_steps = [] # Brooke addition
for item in instruction_steps: # Brooke addition
counter = counter + 1 # Brooke addition
step = item['step'] # Brooke addition
numbered_step = f'{counter}. {step}' # Brooke addition
recipe_steps.append(numbered_step) # Brooke addition
recipe_metadata = {
'recipe_id': recipe_id,
'recipe_title': recipe_title,
'cooking_minutes': cooking_minutes,
'source_url': source_url,
'image': image,
'calories_serving': calories_serving,
'servings': servings,
'recipe_steps': recipe_steps
}
# will need to rename this
recipe_metadata_list.append(recipe_metadata)
recipe_metadata_df = pd.DataFrame(recipe_metadata_list)
# dedupe ingredients df
# recipe_metadata_df.drop_duplicates(inplace=True)
return recipe_metadata_df
###################################################
#####################
#####################
# getQuantities
##################################################
##################################################
##################################################
def getQuantities(query, cuisine):
#######################################
# consider separating this part into a function
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/searchComplex"
url3 = f"https://api.spoonacular.com/recipes/{recipe_id}/information?apiKey={headers2}"
# these will come from form controls
query = query
cuisine = cuisine
type_of_recipe = 'main course'
ranking = "2"
minCalories = "150"
maxCalories = "1500"
minFat = "5"
maxFat = "100"
minProtein = "5"
maxProtein = "100"
minCarbs = "5"
maxCarbs = "100"
querystring = {"limitLicense": "<REQUIRED>",
"offset": "0",
"number": "10",
"query": query,
"cuisine": cuisine,
#"includeIngredients": "onions, lettuce, tomato",
#"excludeIngredients": "coconut, mango",
#"intolerances": "peanut, shellfish",
"type": type_of_recipe,
"ranking": ranking,
"minCalories": minCalories,
"maxCalories": maxCalories,
"minFat": minFat,
"maxFat": maxFat,
"minProtein": minProtein,
"maxProtein": maxProtein,
"minCarbs": minCarbs,
"maxCarbs": maxCarbs,
"instructionsRequired": "True",
"addRecipeInformation": "True",
"fillIngredients": "True",
}
headers = {
'x-rapidapi-key': x_rapidapi_key,
'x-rapidapi-host': x_rapidapi_host
}
headers2 = spoonacular_API # PartDeux Addition
response = requests.get(url, headers=headers, params=querystring)
response_json = response.json()
results = response_json['results']
# consider making everything above part of a separate function
#######################################
# recipe_metadata_list = []
# create an Empty DataFrame object with column headers
column_names = ["recipe_id", "recipe_title", "ingredient_id", "ingredient", "amount_unit", "amount", "unit"]
recipe_quantities_df = pd.DataFrame(columns = column_names)
# ingredients stuff
for result in results:
try:
recipe_id = result['id']
print(recipe_id)
recipe_title = result['title']
response2 = requests.get(url3)
json_data2 = response2.json()
df2 = pd.DataFrame(json_data2["extendedIngredients"])
df3 = df2[['id', 'name', 'original', 'amount', 'unit']]
df4 = df3.rename(columns={"id": "ingredient_id", "name": "ingredient", "original": "amount_unit"})
df4.insert(0, "recipe_id", recipe_id)
df4.insert(1, "recipe_title", recipe_title)
except Exception as e:
print('--- error with something ---')
print(result.keys())
continue
recipe_quantities_df.merge(df4, how='outer')
# recipe_quantities_etal = {
# 'recipe_id': recipe_id,
# 'recipe_title': recipe_title
#}
# will need to rename this
# recipe_metadata_list.append(recipe_metadata)
# recipe_metadata_df = pd.DataFrame(recipe_metadata_list)
# dedupe ingredients df
# recipe_quantities_df.drop_duplicates(inplace=True)
return recipe_quantities_df
###################################################
#####################
#####################
# Connect to Database
##################################################
##################################################
##################################################
cloud_engine = create_engine(f"mysql://{remote_db_user}:{remote_db_pwd}@{remote_db_endpoint}:{remote_db_port}/{remote_db_name}")
cloud_conn = cloud_engine.connect()
#%% Querying the database
query = '''
SELECT DISTINCT
ingredient,
price,
title,
size
FROM
products_subset
'''
products_subset = pd.read_sql(query, cloud_conn)
products_subset.head()
cloud_conn.close()
#######################################################################################
#######################################################################################
#######################################################################################
#######################################################################################
# new_df = pd.DataFrame
def test_MAJOR(recipe_ids_list = [1554861,1560677,1571559]):
global new_df
global grocery_df
# new_df = pd.DataFrame
# import requests
import pandas as pd
import numpy as np
import json
import sqlalchemy
from sqlalchemy import create_engine
from flask import Flask, request, render_template, jsonify
import pymysql
pymysql.install_as_MySQLdb()
#from config import remote_db_endpoint, remote_db_port, remote_db_name, remote_db_user, remote_db_pwd
#from config import x_rapidapi_key, x_rapidapi_host, spoonacular_API
import pprint
# import urllib.request
#**************************************************************************************
capture_list = []
for recipe_id in recipe_ids_list:
url2 = f"https://api.spoonacular.com/recipes/{recipe_id}/information?apiKey={spoonacular_API}"
response = requests.get(url2)
response_json = response.json()
capture_list.append(response_json)
capture_list
print(json.dumps(capture_list, indent=4, sort_keys=True))
recipe_ingredients = []
recipe_steps = []
# ingredients stuff
for result in capture_list: ################ NOT SURE BUT PROBABLY
try:
recipe_id = result['id']
recipe_title = result['title']
analyzedInstructions = result['analyzedInstructions']
except Exception as e:
print('--- error with something ---')
print(e)
continue
instruction_steps = analyzedInstructions[0]['steps'] # Brooke addition
counter = 0 # Brooke addition
# INSTRUCTIONS ##############################
for item in instruction_steps: # Brooke addition
counter = counter + 1 # Brooke addition
step = item['step'] # Brooke addition
numbered_step = f'{counter}. {step}' # Brooke addition
recipe_steps.append(numbered_step) # Brooke addition
# INGREDIENTS ###############################
for instruction in analyzedInstructions:
steps = instruction['steps']
for step in steps:
ingredients = step['ingredients']
for ingredient in ingredients:
ingredient_name = ingredient['name']
recipe_ingredient = {
'recipe_id': recipe_id,
'recipe_title': recipe_title,
'ingredient_name': ingredient_name
}
recipe_ingredients.append(recipe_ingredient)
ingredients_df = pd.DataFrame(recipe_ingredients)
# dedupe ingredients df
# ingredients_df.drop_duplicates()
ingredients_df.drop_duplicates(subset=['ingredient_name'], inplace=True)
ingredients_df
######################## KEEP FOR POSSIBLE USE WITH FUNCTION
# return ingredients_df
cloud_engine = create_engine(f"mysql://{remote_db_user}:{remote_db_pwd}@{remote_db_endpoint}:{remote_db_port}/{remote_db_name}")
cloud_conn = cloud_engine.connect()
#%% Querying the database
query = '''
SELECT DISTINCT
ingredient,
price,
title,
size
FROM
products_subset
'''
products_subset = pd.read_sql(query, cloud_conn)
products_subset
len(products_subset)
# Renamed to GROCERY DF for clarity
# Cut down to a single return for each ingredient
grocery_df = products_subset
grocery_df.drop_duplicates(subset='ingredient', keep='first', inplace=True)
grocery_df = grocery_df.rename(columns={"title": "ingredient_title"})
print(len(grocery_df))
grocery_df.head()
recipe_ingredients_df = ingredients_df
recipe_ingredients_df = recipe_ingredients_df.rename(columns={"ingredient_name": "ingredient"})
recipe_ingredients_df.head()
new_df = pd.merge(recipe_ingredients_df, grocery_df, how="inner", on=["ingredient", "ingredient"])
new_df.head()
return new_df
test_MAJOR()
########################################################################################
def metadataForCards(recipe_ids_list = [1554861,1560677,1571559]):
capture_list = []
for recipe_id in recipe_ids_list:
url2 = f"https://api.spoonacular.com/recipes/{recipe_id}/information?apiKey={spoonacular_API}"
response = requests.get(url2)
response_json = response.json()
capture_list.append(response_json)
capture_list
#print(json.dumps(capture_list, indent=4, sort_keys=True))
recipe_meta = []
# ingredients stuff
for result in capture_list:
try:
recipe_id = result['id']
recipe_title = result['title']
analyzedInstructions = result['analyzedInstructions']
cooking_minutes = result['cookingMinutes']
image = result['image']
servings = result['servings']
source_url = result['sourceUrl']
except Exception as e:
print('--- error with something ---')
print(e)
continue
instruction_steps = analyzedInstructions[0]['steps']
recipe_steps = []
counter = 0
# INSTRUCTIONS ##############################
for item in instruction_steps:
counter = counter + 1
step = item['step']
numbered_step = f'{counter}. {step}'
recipe_steps.append(numbered_step)
# INFO ###############################
recipe_info = {
'recipe_id': recipe_id,
'recipe_title': recipe_title,
'cooking_minutes': cooking_minutes,
'image': image,
'servings': servings,
'steps': recipe_steps,
'source_url': source_url}
recipe_meta.append(recipe_info)
for_cards_df = | pd.DataFrame(recipe_meta) | pandas.DataFrame |
import numpy as np
import pandas as pd
import networkx as nx
import dill
from sklearn.preprocessing import LabelBinarizer
from feature_extraction.features import get_feature_extractor
# Read dataframe.
df = pd.read_csv('./data/trip_data/sampled.csv')
# Add column for merge with weather data.
merge_key = pd.to_datetime(df["lpep_pickup_datetime"], format='%Y-%m-%d %H:%M:%S').dt.strftime('%d-%m-%Y')
merge_key.name = 'key'
df = pd.concat((df, merge_key), axis=1)
# Parse weather data frame and pre-process.
df_weather = pd.read_csv('./data/weather_data/weather_data_nyc_centralpark_2016.csv')
df_weather['precipitation'] = df_weather['precipitation'].replace('T', 0.01).astype(float)
df_weather['snow fall'] = df_weather['snow fall'].replace('T', 0.01).astype(float)
df_weather['snow depth'] = df_weather['snow depth'].replace('T', 1).astype(float)
df_weather['date'] = pd.to_datetime(df_weather['date'], format='%d-%m-%Y').dt.strftime('%d-%m-%Y')
# Merge weather dataset.
df = | pd.merge(df, df_weather, left_on='key', right_on='date') | pandas.merge |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = | pd.interval_range(start=start.asm8, end=end.asm8) | pandas.interval_range |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 13:14:13 2020
@author: <NAME>
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import json
import copy
import shutil
import numpy as np
import pandas as pd
from datetime import timedelta
from datetime import datetime
from sklearn.preprocessing import MaxAbsScaler
from keras.utils import np_utils as ku
from nltk.util import ngrams
import utils.support as sup
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, GRU, Dropout, Flatten
from tensorflow.keras.layers import Conv1DTranspose, AveragePooling1D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
from pickle import dump, load
import warnings
class DeepLearningGenerator():
"""
This class evaluates the inter-arrival times
"""
def __init__(self, ia_train, ia_valdn, parms):
"""constructor"""
self.temp_output = os.path.join('output_files', sup.folder_id())
if not os.path.exists(self.temp_output):
os.makedirs(self.temp_output)
self.ia_train = ia_train
self.ia_valdn = ia_valdn
self.parms = parms
self.model_metadata = dict()
self.is_safe = True
self._load_model()
# @safe_exec
def _load_model(self) -> None:
model_path = os.path.join(self.parms['ia_gen_path'],
self.parms['file'].split('.')[0]+'_dl.h5')
# Save path(s) if the model exists
self.model_path = model_path
model_exist = os.path.exists(model_path)
self.parms['model_path'] = model_path
# Discover and compare
if not model_exist or self.parms['update_ia_gen']:
scaler = MaxAbsScaler()
scaler.fit(self.ia_train[['inter_time']])
acc = self._discover_model(scaler)
save, metadata_file = self._compare_models(acc,
self.parms['update_ia_gen'], model_path)
if save:
self._save_model(metadata_file, acc)
else:
shutil.rmtree(self.temp_output)
# Save basic features scaler
name = self.model_path.replace('_dl.h5', '')
dump(scaler, open(name+'_ia_scaler.pkl','wb'))
def _compare_models(self, acc, model_exist, file):
metadata_file = os.path.join(self.parms['ia_gen_path'],
self.parms['file'].split('.')[0]+'_dl_meta.json')
# compare with existing model
save = True
if model_exist:
# Loading of parameters from existing model
if os.path.exists(metadata_file):
with open(metadata_file) as file:
data = json.load(file)
if data['loss'] < acc['loss']:
save = False
return save, metadata_file
# @safe_exec
def _discover_model(self, scaler):
n_size = 5
ia_valdn = copy.deepcopy(self.ia_valdn)
# Transform features
self.ia_train = self._transform_features(self.ia_train, scaler)
ia_valdn = self._transform_features(ia_valdn, scaler)
columns = self.ia_train.columns
# vectorization
serie_trn, y_serie_trn = self._vectorize(self.ia_train, columns, n_size)
serie_val, y_serie_val = self._vectorize(ia_valdn, columns, n_size)
# model training
model = self._create_model(serie_trn.shape[1],
serie_trn.shape[2], 1)
model = self._train_model(model,
(serie_trn, y_serie_trn),
(serie_val, y_serie_val))
return model.evaluate(x=serie_val, y=y_serie_val, return_dict=True)
def _save_model(self, metadata_file, acc):
# best structure mining parameters
self.model_metadata['loss'] = acc['loss']
self.model_metadata['generated_at'] = (
datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
# Copy best model to destination folder
destintion = os.path.join(self.parms['ia_gen_path'],
self.parms['file'].split('.')[0]+'_dl.h5')
source = os.path.join(self.temp_output,
self.parms['file'].split('.')[0]+'_dl.h5')
shutil.copyfile(source, destintion)
# Save metadata
sup.create_json(self.model_metadata, metadata_file)
# clean output folder
shutil.rmtree(self.temp_output)
def _vectorize(self, log, cols, ngram_size):
"""
Dataframe vectorizer.
parms:
columns: list of features to vectorize.
parms (dict): parms for training the network
Returns:
dict: Dictionary that contains all the LSTM inputs.
"""
vec = dict()
week_col = 'weekday'
exp_col = ['inter_time']
log = log[cols]
num_samples = len(log)
dt_prefixes = list()
dt_expected = list()
# for key, group in log.groupby('caseid'):
dt_prefix = pd.DataFrame(0, index=range(ngram_size), columns=cols)
dt_prefix = pd.concat([dt_prefix, log], axis=0)
dt_prefix = dt_prefix.iloc[:-1]
dt_expected.append(log[exp_col])
for nr_events in range(0, num_samples):
tmp = dt_prefix.iloc[nr_events:nr_events+ngram_size].copy()
tmp['ngram_num'] = nr_events
dt_prefixes.append(tmp)
dt_prefixes = | pd.concat(dt_prefixes, axis=0, ignore_index=True) | pandas.concat |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
| PeriodIndex(start="2000", periods=2) | pandas.PeriodIndex |
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/11/4 17:39
describe: A股强势股票传感器
"""
import os
import os.path
import traceback
import inspect
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import timedelta, datetime
from collections import Counter
from tqdm import tqdm
from typing import Callable
from czsc.objects import Event
from czsc.utils import io
from czsc.data.ts_cache import TsDataCache, Freq
from czsc.sensors.utils import get_index_beta, generate_signals, max_draw_down, turn_over_rate
from czsc.utils import WordWriter
plt.style.use('ggplot')
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def selected_filter_by_index(dc: TsDataCache, dfg: pd.DataFrame, index_code=None):
"""使用指数成分过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param index_code: 指数代码
:return: 过滤后的选股结果
"""
if not index_code or dfg.empty:
return dfg
assert dfg['trade_date'].nunique() == 1
trade_date = dfg['trade_date'].max()
index_members = dc.index_weight(index_code, trade_date)
ts_codes = list(index_members['con_code'].unique())
return dfg[dfg.ts_code.isin(ts_codes)]
def selected_filter_by_concepts(dc, dfg, top_n=20, min_n=3, method='v1'):
"""使用板块效应过滤
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:param top_n: 选取前 n 个密集概念
:param min_n: 单股票至少要有 n 个概念在 top_n 中
:param method: 打分计算方法
v1 直接取板块中的强势股数量作为分数
v2 板块内强势股数 / 板块内股数
:return: 过滤后的选股结果
"""
if dfg.empty or not top_n or not min_n:
return dfg, []
ths_members = dc.get_all_ths_members(exchange="A", type_="N")
ths_members = ths_members[~ths_members['概念名称'].isin([
'MSCI概念', '沪股通', '深股通', '融资融券', '上证180成份股', '央企国资改革',
'标普道琼斯A股', '中证500成份股', '上证380成份股', '沪深300样本股',
])]
ths_concepts = ths_members[ths_members.code.isin(dfg.ts_code)]
if method == 'v1':
key_concepts = [k for k, v in Counter(ths_concepts['概念名称'].to_list()).most_common(top_n)]
elif method == 'v2':
all_count = Counter(ths_members['概念名称'].to_list())
sel_count = Counter(ths_concepts['概念名称'].to_list())
df_scores = pd.DataFrame([{"concept": k, 'score': sel_count[k] / all_count[k]}
for k in sel_count.keys()])
key_concepts = df_scores.sort_values('score', ascending=False).head(top_n)['concept'].to_list()
else:
raise ValueError(f"method value error")
sel = ths_concepts[ths_concepts['概念名称'].isin(key_concepts)]
ts_codes = [k for k, v in Counter(sel.code).most_common() if v >= min_n]
dfg = dfg[dfg.ts_code.isin(ts_codes)]
dfg.loc[:, '概念板块'] = dfg.ts_code.apply(lambda x: ths_concepts[ths_concepts.code == x]['概念名称'].to_list())
dfg.loc[:, '概念数量'] = dfg['概念板块'].apply(len)
return dfg, key_concepts
def selected_filter_by_market_value(dfg, min_total_mv=None):
"""使用总市值过滤
:param dfg: 单个交易日的强势股选股结果
:param min_total_mv: 最小总市值,单位为万元,1e6万元 = 100亿
:return: 过滤后的选股结果
"""
if dfg.empty or not min_total_mv:
return dfg
return dfg[dfg['total_mv'] >= min_total_mv]
def selected_filter_by_rps(dfg, n=21, v_range=(0.2, 0.8), max_count=-1):
"""使用b20b过滤,b20b 表示前20个交易日的涨跌幅
:param dfg: 单个交易日的强势股选股结果
:param n: RPS的计算区间
:param v_range: RPS值按从大到小排序后的可选区间
默认为 0.2 ~ 0.8,表示可选区间为排序位置在 20% ~ 80% 区间的股票
:param max_count: 最多保留结果数量
:return: 过滤后的选股结果
"""
if dfg.empty or (not max_count) or len(dfg) < max_count:
return dfg
rps_col = f"b{n}b"
# dfg = dfg.sort_values(rps_col, ascending=True)
# dfg = dfg.reset_index(drop=True)
# dfg = dfg.iloc[int(len(dfg) * v_range[0]): int(len(dfg) * v_range[1])]
# return dfg.tail(max_count)
split = v_range[1]
dfg = dfg.sort_values(rps_col, ascending=True)
head_i = int((len(dfg) - max_count) * split) + 1
tail_i = len(dfg) - int((len(dfg) - max_count) * (1 - split))
return dfg.iloc[head_i: tail_i]
def create_next_positions(dc: TsDataCache, dfg: pd.DataFrame):
"""构建某天选股结果对应的下一交易日持仓明细
:param dc: 数据缓存对象
:param dfg: 单个交易日的强势股选股结果
:return: 下一交易日持仓明细
"""
if dfg.empty:
return dfg
trade_cal = dc.trade_cal()
trade_cal = trade_cal[trade_cal.is_open == 1]
trade_dates = trade_cal.cal_date.to_list()
trade_date = dfg['trade_date'].iloc[0]
hold = dfg.copy()
hold['成分日期'] = trade_dates[trade_dates.index(trade_date.strftime("%Y%m%d")) + 1]
hold['持仓权重'] = 0.98 / len(dfg)
hold.rename({'ts_code': "证券代码", "close": "交易价格"}, inplace=True, axis=1)
hold = hold[['证券代码', '持仓权重', '交易价格', '成分日期']]
hold['成分日期'] = pd.to_datetime(hold['成分日期']).apply(lambda x: x.strftime("%Y/%m/%d"))
return hold
def plot_alpha_v1(beta_name, df_alpha, file_png) -> None:
"""用三个并列线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
plt.close()
fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(9, 5*3))
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
for i, col in enumerate(['alpha_curve', 'selector_curve', f"beta_curve:{beta_name}"], 0):
ax = axes[i]
sns.lineplot(x='date', y=col, data=df_alpha, ax=ax)
ax.text(x=df_alpha['date'].iloc[0], y=int(df_alpha[col].mean()),
s=f"{col}:{int(df_alpha[col].iloc[-1])}", fontsize=12)
ax.set_title(f"{col}", loc='center')
ax.set_xlabel("")
plt.savefig(file_png, bbox_inches='tight', dpi=100)
plt.close()
def plot_alpha_v2(beta_name, df_alpha, file_png) -> None:
"""用线图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
sns.lineplot(x='date', y='alpha_curve', data=df_alpha)
sns.lineplot(x='date', y='selector_curve', data=df_alpha)
sns.lineplot(x='date', y=f"beta_curve:{beta_name}", data=df_alpha)
plt.legend(labels=['超额', '选股', f"基准{beta_name}"])
plt.savefig(file_png, bbox_inches='tight', dpi=100)
def plot_alpha_v3(beta_name, df_alpha, file_png) -> None:
"""用类似MACD图来绘制 alpha 信息
:param beta_name: 基准指数名称
:param df_alpha: 包含 ['trade_date', 'beta', 'selector']
trade_date beta selector
0 2018-01-02 88.4782 93.471190
1 2018-01-03 45.8368 41.008785
2 2018-01-04 -0.4383 -132.660895
3 2018-01-05 45.0786 120.726060
4 2018-01-08 -0.6757 -17.231665
:param file_png: 图片保存文件名
:return: None
"""
df_alpha['beta_curve'] = df_alpha['beta'].cumsum()
df_alpha['selector_curve'] = df_alpha['selector'].cumsum()
df_alpha['alpha'] = df_alpha['selector'] - df_alpha['beta']
df_alpha['alpha_curve'] = df_alpha['selector_curve'] - df_alpha['beta_curve']
df_alpha.rename({'trade_date': 'date', 'beta_curve': f"beta_curve:{beta_name}"}, inplace=True, axis=1)
plt.close()
plt.figure(figsize=(9, 5))
x = df_alpha['date']
plt.bar(x, height=df_alpha['alpha'], width=0.01, color='blue', label='alpha')
plt.plot(x, df_alpha['alpha_curve'], label='alpha_curve')
plt.plot(x, df_alpha['selector_curve'], label='selector_curve')
plt.plot(x, df_alpha[f"beta_curve:{beta_name}"], label=f"beta_curve:{beta_name}")
plt.legend()
plt.savefig(file_png, bbox_inches='tight', dpi=100)
class StocksDaySensor:
"""以日线为基础周期的强势股票感应器
输入:市场个股全部行情、概念板块成分信息
输出:强势个股列表以及概念板块分布
"""
def __init__(self,
experiment_path: str,
sdt: str,
edt: str,
dc: TsDataCache,
strategy: Callable,
signals_n: int = 0,
):
self.name = self.__class__.__name__
self.version = "V20220404"
self.experiment_path = experiment_path
self.results_path = os.path.join(experiment_path, f"{strategy()[1]().name}_{sdt}_{edt}")
self.signals_path = os.path.join(experiment_path, 'signals')
os.makedirs(self.experiment_path, exist_ok=True)
os.makedirs(self.results_path, exist_ok=True)
os.makedirs(self.signals_path, exist_ok=True)
self.sdt = sdt
self.edt = edt
self.verbose = os.environ.get('verbose', False)
self.strategy = strategy
self.signals_n = signals_n
self.get_signals, self.get_event = strategy()
self.event: Event = self.get_event()
self.base_freq = Freq.D.value
self.freqs = [Freq.W.value, Freq.M.value]
self.file_docx = os.path.join(self.results_path, f'{self.event.name}_{sdt}_{edt}.docx')
writer = WordWriter(self.file_docx)
if not os.path.exists(self.file_docx):
writer.add_title("股票选股强度验证")
writer.add_page_break()
writer.add_heading(f"{datetime.now().strftime('%Y-%m-%d %H:%M')} {self.event.name}", level=1)
writer.add_heading("参数配置", level=2)
writer.add_paragraph(f"测试方法描述:{self.event.name}")
writer.add_paragraph(f"测试起止日期:{sdt} ~ {edt}")
writer.add_paragraph(f"信号计算函数:\n{inspect.getsource(self.get_signals)}")
writer.add_paragraph(f"事件具体描述:\n{inspect.getsource(self.get_event)}")
writer.save()
with open(os.path.join(self.results_path, f"{strategy.__name__}.txt"), mode='w') as f:
f.write(inspect.getsource(strategy))
self.writer = writer
self.dc = dc
self.betas = ['000905.SH', '000300.SH', '399006.SZ']
get_index_beta(dc, sdt, edt, freq='D', indices=self.betas,
file_xlsx=os.path.join(self.results_path, 'betas.xlsx'))
file_dfm = os.path.join(self.results_path, f'df_event_matched_{sdt}_{edt}.pkl')
file_dfb = os.path.join(self.experiment_path, f'df_all_bars_{sdt}_{edt}.pkl')
if os.path.exists(file_dfm):
self.dfm = io.read_pkl(file_dfm)
self.dfb = io.read_pkl(file_dfb)
else:
self.dfm, self.dfb = self.get_stock_strong_days()
io.save_pkl(self.dfm, file_dfm)
io.save_pkl(self.dfb, file_dfb)
self.nb_cols = [x for x in self.dfb.columns if x[0] == 'n' and x[-1] == 'b']
def get_share_strong_days(self, ts_code: str, name: str):
"""获取单个标的全部强势信号日期"""
dc = self.dc
event = self.event
sdt = self.sdt
edt = self.edt
file_signals = os.path.join(self.signals_path, f"{ts_code}.pkl")
if os.path.exists(file_signals):
signals, n_bars = io.read_pkl(file_signals)
if self.verbose:
print(f"get_share_strong_days: load signals from {file_signals}")
else:
start_date = pd.to_datetime(self.sdt) - timedelta(days=3000)
bars = dc.pro_bar(ts_code=ts_code, start_date=start_date, end_date=edt, freq='D', asset="E", raw_bar=True)
n_bars = dc.pro_bar(ts_code=ts_code, start_date=sdt, end_date=edt, freq='D', asset="E", raw_bar=False)
signals = generate_signals(bars, sdt, self.base_freq, self.freqs, self.get_signals,
signals_n=self.signals_n)
io.save_pkl([signals, n_bars], file_signals)
nb_dicts = {row['trade_date'].strftime("%Y%m%d"): row for row in n_bars.to_dict("records")}
event_matched = []
for s in signals:
m, f = event.is_match(s)
if m:
nb_info = nb_dicts.get(s['dt'].strftime("%Y%m%d"), None)
r = {'name': name, 'event_match': True, 'factor_match': f}
if nb_info:
r.update(nb_info)
event_matched.append(r)
dfs = pd.DataFrame(event_matched)
if event_matched:
df_ = dc.daily_basic(ts_code, sdt, dc.edt)
df_['trade_date'] = pd.to_datetime(df_['trade_date'])
dfs = dfs.merge(df_[['trade_date', 'total_mv']], on='trade_date', how='left')
dfs = dfs[pd.to_datetime(sdt) <= dfs['trade_date']]
dfs = dfs[dfs['trade_date'] <= pd.to_datetime(edt)]
print(f"{ts_code} - {name}: {len(dfs)}")
return dfs, n_bars
def get_stock_strong_days(self):
"""获取全部股票的强势日期"""
stocks = self.dc.stock_basic()
all_matched = []
all_bars = []
for row in tqdm(stocks.to_dict('records'), desc="get_stock_strong_days"):
ts_code = row['ts_code']
name = row['name']
try:
dfs, n_bars = self.get_share_strong_days(ts_code, name)
all_matched.append(dfs)
all_bars.append(n_bars)
except:
print(f"get_share_strong_days error: {ts_code}, {name}")
traceback.print_exc()
dfm = pd.concat(all_matched, ignore_index=True)
dfb = | pd.concat(all_bars, ignore_index=True) | pandas.concat |
# County Housing Vacancy Raw Numbers
# Source: Census (census.data.gov) advanced search (Topics: 'Housing-Vacancy-Vacancy Rates' ('Vacancy Status' tabl); Geography: All US Counties; Years: 2010-2018 ACS 5-Yr. Estimates)
import pandas as pd
import numpy as np
import os
master_df = pd.DataFrame()
counter = 0
for file in os.listdir('County Vacancy Raw 5yr')[::3]:
year = int(file[7:11])
print(year)
data_df = | pd.read_csv('County Vacancy Raw 5yr/' + file, skiprows=1, usecols=[0,1,2,4,6,8,10]) | pandas.read_csv |
"""Module for loading results from decoding experiments."""
import json
from pathlib import Path
from typing import Optional, Union
import mne_bids
import numpy as np
import pandas as pd
import pte
import pte_stats
def load_results_singlechannel(
files_or_dir: Union[str, list, Path],
scoring_key: str = "balanced_accuracy",
average_runs: bool = False,
) -> pd.DataFrame:
"""Load results from *results.csv"""
# Create Dataframes from Files
files_or_dir = _handle_files_or_dir(
files_or_dir=files_or_dir, extensions="results.csv"
)
results = []
for file in files_or_dir:
subject = mne_bids.get_entities_from_fname(file, on_error="ignore")[
"subject"
]
data: pd.DataFrame = pd.read_csv( # type: ignore
file,
index_col="channel_name",
header=0,
usecols=["channel_name", scoring_key],
)
for ch_name in data.index.unique():
score = (
data.loc[ch_name]
.mean(numeric_only=True)
.values[0] # type: ignore
)
results.append([subject, ch_name, score])
columns = [
"Subject",
"Channel",
scoring_key,
]
columns = _normalize_columns(columns)
data_out = pd.DataFrame(results, columns=columns)
if average_runs:
data_out = data_out.set_index(["Subject", "Channel"]).sort_index()
results_average = []
for ind in data_out.index.unique():
result_av = data_out.loc[ind].mean(numeric_only=True).values[0]
results_average.append([*ind, result_av])
data_out = pd.DataFrame(results_average, columns=columns)
return data_out
def load_results(
files_or_dir: Union[str, list, Path],
scoring_key: str = "balanced_accuracy",
average_results: bool = True,
) -> pd.DataFrame:
"""Load prediction results from *results.csv"""
# Create Dataframes from Files
files_or_dir = _handle_files_or_dir(
files_or_dir=files_or_dir, extensions="results.csv"
)
results = []
for file in files_or_dir:
data_raw = pd.read_csv(file, index_col=[0], header=[0])
data: pd.DataFrame = pd.melt(
data_raw, id_vars=["channel_name"], value_vars=[scoring_key]
)
accuracies = []
for ch_name in data["channel_name"].unique():
accuracies.append(
[
"LFP" if "LFP" in ch_name else "ECOG",
data[data.channel_name == ch_name]
.mean(numeric_only=True)
.value, # type: ignore
]
)
df_acc = pd.DataFrame(accuracies, columns=["Channels", scoring_key])
df_lfp = df_acc[df_acc["Channels"] == "LFP"]
df_ecog = df_acc[df_acc["Channels"] == "ECOG"]
subject = mne_bids.get_entities_from_fname(file, on_error="ignore")[
"subject"
]
values = [
file,
subject,
"OFF" if "MedOff" in file else "ON",
"OFF" if "StimOff" in file else "ON",
]
results.extend(
[
values + ["LFP", df_lfp[scoring_key].max()],
values + ["ECOG", df_ecog[scoring_key].max()],
]
)
columns = [
"Filename",
"Subject",
"Medication",
"Stimulation",
"Channels",
scoring_key,
]
columns = _normalize_columns(columns)
df_raw = pd.DataFrame(results, columns=columns)
if not average_results:
return df_raw
scoring_key = _normalize_columns([scoring_key])[0]
results_average = []
for ch_name in df_raw["Channels"].unique():
df_ch = df_raw.loc[df_raw["Channels"] == ch_name]
for subject in df_ch["Subject"].unique():
df_subj = df_ch.loc[df_ch["Subject"] == subject]
series_single = pd.Series(
df_subj.iloc[0].values, index=df_subj.columns
).drop("Filename")
series_single[scoring_key] = df_subj[scoring_key].mean()
results_average.append(series_single)
df_average = pd.DataFrame(results_average)
return df_average
def _normalize_columns(columns: list[str]) -> list[str]:
"""Normalize column names."""
new_columns = [
"".join([substr.capitalize() for substr in col.split("_")])
for col in columns
]
return new_columns
def _handle_files_or_dir(
files_or_dir: Union[str, list, Path],
extensions: Optional[Union[str, list]] = None,
) -> list:
"""Handle different cases of files_or_dir."""
if isinstance(files_or_dir, list):
return files_or_dir
file_finder = pte.filetools.get_filefinder(datatype="any")
file_finder.find_files(
directory=files_or_dir,
extensions=extensions,
verbose=True,
)
return file_finder.files
def _load_labels_single(
fpath: Union[str, Path],
baseline: Optional[
tuple[Optional[Union[int, float]], Optional[Union[int, float]]]
],
baseline_mode: Optional[str],
base_start: Optional[int],
base_end: Optional[int],
) -> pd.DataFrame:
"""Load time-locked predictions from single file."""
entities = mne_bids.get_entities_from_fname(fpath, on_error="ignore")
with open(fpath, "r", encoding="utf-8") as file:
data = json.load(file)
label_name = data["TargetName"]
label_arr = np.stack(data["Target"], axis=0)
if baseline is not None:
label_arr = pte_stats.baseline_correct(
label_arr, baseline_mode, base_start, base_end
)
labels = pd.DataFrame(
data=[
[
entities["subject"],
entities["session"],
entities["task"],
entities["run"],
entities["acquisition"],
label_name,
label_arr,
]
],
columns=[
"Subject",
"Session",
"Task",
"Run",
"Acquisition",
"Channel Name",
"Data",
],
)
return labels
def load_predictions(
files_or_dir: Union[str, list, Path],
mode: str = "predictions",
sfreq: Optional[Union[int, float]] = None,
baseline: Optional[
tuple[Optional[Union[int, float]], Optional[Union[int, float]]]
] = None,
baseline_mode: str = "zscore",
average_predictions: bool = False,
concatenate_runs: bool = True,
average_runs: bool = False,
) -> pd.DataFrame:
"""Load time-locked predictions."""
files_or_dir = _handle_files_or_dir(
files_or_dir=files_or_dir, extensions="predictions_timelocked.json"
)
base_start, base_end = pte_stats.handle_baseline(
baseline=baseline, sfreq=sfreq
)
df_list = []
for fpath in files_or_dir:
if mode == "predictions":
df_single: pd.DataFrame = _load_predictions_single(
fpath=fpath,
baseline=baseline,
baseline_mode=baseline_mode,
base_start=base_start,
base_end=base_end,
)
elif mode == "targets":
df_single: pd.DataFrame = _load_labels_single(
fpath=fpath,
baseline=baseline,
baseline_mode=baseline_mode,
base_start=base_start,
base_end=base_end,
)
else:
raise ValueError(
"`mode` must be one of either `targets` or "
f"`predictions. Got: {mode}."
)
if average_predictions:
df_single["Data"] = (
df_single["Data"]
.apply(np.mean, axis=0)
.apply(np.expand_dims, axis=0)
)
df_list.append(df_single)
df_all: pd.DataFrame = | pd.concat(objs=df_list) | pandas.concat |
import re
import json
import datetime
import xlwings as xw
import pandas as pd
import frontur_utilities as df
import frontur_excel_addin.utility_functions as uf
from frontur_utilities.solver_df import df_solver
@xw.func
@xw.ret(index=False, expand='table')
def sample():
"""Elimina todos los espacios de la columna seleccionada"""
data_frame = df.df_fileloader.load_agenda(df.const.FRONTUR_FILE_PATH)
data_frame = df.extract_methods.add_plane_data(data_frame, df.const.PLANES_DATA_FILE_PATH)
data_frame = df.extract_methods.format_dates(data_frame)
data_frame = df.extract_methods.select_days(data_frame, df.const.DAYS_FILE_PATH)
return uf.encode_dataframe(data_frame.iloc[:1])
@xw.func
@xw.arg('data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.ret(index=False, expand='table')
def expand_date_intervals(data_frame):
"""Elimina todos los espacios de la columna seleccionada"""
data_frame = uf.decode_dataframe(data_frame)
data_frame = df.extract_methods.format_dates(data_frame)
return uf.encode_dataframe(data_frame.sort_values(by=[df.const.DF_DAY_COL_NAME]))
@xw.func
@xw.arg('data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.arg('dates', ndim=2, doc='argumento')
@xw.arg('airport', ndim=1, doc='argumento')
@xw.ret(index=False, expand='table')
def get_flights(data_frame, dates, airport, date_format=df.const.DF_DATAFRAME_DAY_FORMAT):
"""Elimina todos los espacios de la columna seleccionada"""
data_frame = uf.decode_dataframe(data_frame)
data_frame = df.extract_methods.select_airport(data_frame, airport[0])
data_frame = df.extract_methods.format_dates(data_frame, df.const.DF_WEEKDAY_COL_NAME)
dates = df.utility.flatten(dates)
dates = [ str(x.strftime(date_format)) for x in dates]
data_frame = df.df_utility.select_rows(data_frame, dict([
('day_column_name', df.const.DF_DAY_COL_NAME),
('format', date_format),
('days', dates)
]),
# days=dates
)
data_frame[df.const.DF_PLANE_COL_NAME] = data_frame[df.const.DF_PLANE_COL_NAME].apply(lambda x: re.sub(r"\.\d+", '', str(x)))
data_frame = df.extract_methods.add_plane_data(data_frame, df.const.PLANES_DATA_FILE_PATH)
return uf.encode_dataframe(data_frame)
@xw.func
@xw.arg('data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.arg('target_column', ndim=1, doc='argumento')
@xw.arg('date_format', ndim=1, doc='argumento')
@xw.arg('dates', ndim=2, doc='argumento')
@xw.ret(index=False, expand='table')
def pick_selected_dates(data_frame, target_column, dates, date_format):
data_frame = uf.decode_dataframe(data_frame)
dates = df.utility.flatten(dates)
dates = [ str(x.strftime(date_format[0])) for x in dates]
data_frame = df.df_utility.select_rows(data_frame, dict([
('day_column_name', target_column[0]),
('format', date_format[0]),
('days', dates)
]),
days=dates
)
return data_frame
@xw.func
@xw.arg('first_data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.arg('second_data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.arg('target_column', ndim=1)
@xw.ret(index=False, expand='table')
def inner_merge(first_data_frame, second_data_frame, target_column):
first_data_frame = uf.decode_dataframe(first_data_frame)
second_data_frame = uf.decode_dataframe(second_data_frame)
data_frame = pd.merge(first_data_frame, second_data_frame, how='inner', on=target_column)
return uf.encode_dataframe(data_frame)
@xw.func
@xw.arg('data_frame', pd.DataFrame, index=False)
@xw.arg('reference_column', ndim=1, doc='argumento')
@xw.arg('target_column', ndim=1, doc='argumento')
@xw.arg('values', ndim=2, doc='argumento')
@xw.arg('replace_value', ndim=1, doc='argumento')
@xw.ret(index=False, expand='table')
def substitute_rows(data_frame, reference_column, target_column, values, replace_value):
data_frame = uf.decode_dataframe(data_frame)
values = df.utility.flatten(values)
df.df_utility.substitute_rows(data_frame, dict([
('column_name', target_column[0]),
('reference_column_name', reference_column[0]),
('reference_column_values', values),
('replace_value', replace_value[0])
]))
return uf.encode_dataframe(data_frame)
@xw.func
@xw.arg('cell_matrix', ndim=2, doc='argumento')
@xw.ret(index=False)
def assemble_cells(*cell_matrix):
"""Elimina todos los espacios de la columna seleccionada"""
cell_matrix = df.utility.flatten(cell_matrix)
data_frame = pd.DataFrame(cell_matrix[1:], columns=cell_matrix[0])
return data_frame
@xw.func(async_mode='threading')
@xw.arg('data_frame', pd.DataFrame, header=True, index=False, dates=datetime.datetime)
@xw.ret(index=False, expand='table')
def get_interviews(data_frame):
data_frame = uf.decode_dataframe(data_frame)
data_frame = df_solver(data_frame, no_groups=True, parameters={
'workday_time': pd.Timedelta(hours=8).seconds,
'rest_time': pd.Timedelta(minutes=10).seconds,
'execution_time_limit': | pd.Timedelta(minutes=15) | pandas.Timedelta |
import pandas as pd
from ml_toolkit.time_series import GroupTimeSeriesSplit
if __name__ == '__main__':
a = | pd.DataFrame({'value': [1, 1, 17, 1, 2, 2, 22, 20, 4, 5, 4, 4, 8, 1, 2, 4]}) | pandas.DataFrame |
"""
Extract metaphorical/non-metaphorical verbs, their arguments, and contexts
(sentences) from the VU Amsterdam corpus
"""
import json
import os
from tqdm import tqdm
tqdm.pandas()
from bs4 import BeautifulSoup
import pandas as pd
from gensim.utils import simple_preprocess
from pycorenlp import StanfordCoreNLP
nlp = None
GENRE_MAP = {
'ACPROSE': 'academic',
'NEWS': 'news',
'FICTION': 'fiction',
'CONVRSN': 'conversation'
}
def normalize_whitespace(x):
return ' '.join(x.strip().split())
def load_vuamc(filepath):
with open(filepath, 'r') as vuamc_f:
vuamc = BeautifulSoup(vuamc_f.read(), 'lxml')
return vuamc.find('text')
def load_lemmas(jsonlines_f):
verb_lemmas = []
ids = []
with open(jsonlines_f, 'r') as jf:
for line in jf:
lemma_json = json.loads(line)
verb_lemmas.append(lemma_json['x']['U-lemmaByPOS'])
ids.append(lemma_json['id'])
return pd.DataFrame({'verb_lemma': verb_lemmas, 'id': ids})
def split_text_segment_id(text_segment_id):
"""
Get the bnc part and fragment id given a text segment id.
"""
bnc_part, frg_n = text_segment_id.split('-')
assert len(bnc_part) == 3, text_segment_id
assert frg_n.startswith('fragment')
return bnc_part.upper(), int(frg_n[8:])
def load_ets_annot(corpus_fname, index_fname):
"""
Load ETS training data.
Corpus_fname contains ids and gold labels; index_fname contains ids and
genres. We need to consolidate both.
"""
ids = []
ys = []
with open(corpus_fname, 'r') as fin:
for line in fin:
ljson = json.loads(line)
ids.append(ljson['id'])
ys.append(ljson['y'])
test_labels = pd.DataFrame({'id': ids, 'y': ys})
with open(index_fname, 'r') as fin:
vals = pd.read_csv(fin)
colnames = [('text_segment_id', str), ('sentence_number',
str), ('sentence_offset', int),
('word_offset', int), ('subword_offset', int), ('verb', str)]
for (colname,
coltype), series in zip(colnames,
zip(*vals.id.apply(lambda x: x.split('_')))):
vals[colname] = pd.Series(series).astype(coltype)
# Also get BNC data
vals['bnc_file'], vals['bnc_file_n'] = \
zip(*vals.text_segment_id.apply(split_text_segment_id))
vals = vals.merge(test_labels, on='id')
return vals
def load_ets_test(test_fname):
"""
Load ETS test data.
"""
vals = pd.read_csv(test_fname, names=['id', 'y'])
colnames = [('text_segment_id', str), ('sentence_number', str),
('sentence_offset', int), ('word_offset',
int), ('subword_offset', int)]
for (colname,
coltype), series in zip(colnames,
zip(*vals.id.apply(lambda x: x.split('_')))):
vals[colname] = pd.Series(series).astype(coltype)
vals['bnc_file'], vals['bnc_file_n'] = \
zip(*vals.text_segment_id.apply(split_text_segment_id))
return vals
def load_fragment(soup, fragment, cache):
"""
Load and cache a BNC fragment, given the id `fragment`.
"""
if fragment in cache:
return cache[fragment]
fragment_xml = soup.find('text', {'xml:id': fragment})
cache[fragment] = fragment_xml
return fragment_xml
def get_sentence(soup, ets_series, cache, get_verb=False):
"""
Given an ETS example `ets_series`, find the corresponding fragment, and
retrieve the sentence corresponding to the ETS example.
"""
frg = load_fragment(soup, ets_series.text_segment_id, cache)
sentence = frg.find('s', {'n': ets_series.sentence_number})
if get_verb:
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
# Offset starts from 1
verb = raw_tokens[ets_series['word_offset'] - 1].lower()
return tokenized, raw_tokens, verb
tokenized, raw_tokens = tokenize_vuamc(sentence, raw=True)
return tokenized, raw_tokens
def load_bncf(ets_series, bnc_xml_folder):
"""
Load BNC file and convert to BeautifulSoup.
"""
bncf = ets_series.bnc_file
path_to_bncf = os.path.join(bnc_xml_folder, bncf[0], bncf[0:2],
'{}.xml'.format(bncf))
with open(path_to_bncf, 'r') as bnc_fin:
return BeautifulSoup(bnc_fin.read(), 'lxml')
def is_div_level(maybe_div, level):
"""
Check if this BNC div exists and is of the same level as `level` (e.g. 4,
3, 2)
"""
if maybe_div is None:
return False
return (maybe_div.name == 'div' and 'level' in maybe_div.attrs
and maybe_div.attrs['level'] == str(level))
def extract_lemmas(sentence):
"""
Extract lemmas from a BNC XML sentence.
"""
# Get all word tags - some may be nested in multiword <mw> tags
children = sentence.find_all('w')
lemmas = []
for child in children:
lemmas.append(child['hw'])
return lemmas
def get_document(ets_series, cache, frg_sent_cache, lemmatize, bnc_xml_folder):
"""
Retrieve entire document (i.e. fragment) from bnc_xml.
"""
frg_sent_key = '{}_{}'.format(ets_series.text_segment_id,
ets_series.sentence_number)
if frg_sent_key in frg_sent_cache:
return frg_sent_cache[frg_sent_key]
if ets_series.bnc_file not in cache:
cache[ets_series.bnc_file] = load_bncf(ets_series, bnc_xml_folder)
bnc_xml = cache[ets_series.bnc_file]
bnc_n = ets_series.bnc_file_n
# Fragments are divs with level 1
fragments = bnc_xml.find_all('div', {'level': '1'})
if not fragments:
# No levels in this xml file (e.g. KDB.xml)
fragments = bnc_xml.find_all('div')
frg = fragments[bnc_n - 1] # BNC is 1-indexed
# Get sentence by number
if ets_series.sentence_number == '675a':
# Some kind of BNC glitch - just get original 675
sentence_number = '675'
else:
sentence_number = ets_series.sentence_number
sentence = frg.find('s', {'n': sentence_number})
smallest_context = None
smallest_context_len = 1e10
# Get sentence's parent paragraph or div
paragraph = sentence.parent
while paragraph is not None and paragraph.name != 'p':
paragraph = paragraph.parent
if paragraph is not None:
paragraph = paragraph.text.strip()
if len(paragraph) < smallest_context_len:
smallest_context_len = len(paragraph)
smallest_context = paragraph
# For spoken text, wrapped in utterances
utterance = sentence.parent
while utterance is not None and utterance.name != 'u':
utterance = utterance.parent
if utterance is not None:
utterance = utterance.text.strip()
if len(utterance) < smallest_context_len:
smallest_context_len = len(utterance)
smallest_context = utterance
# Get div4, div3, div2, div1 if they exist
ret_vals = [None, paragraph, utterance]
for i in (4, 3, 2):
div = sentence.parent
while div is not None and not is_div_level(div, i):
div = div.parent
if div is None:
ret_vals.append(None)
else:
div = div.text.strip()
if len(div) < smallest_context_len:
smallest_context_len = len(div)
smallest_context = div
ret_vals.append(div)
# div1 is the already-found fragment (could have no level e.g. KBD.xml)
frg = frg.text.strip()
ret_vals.append(frg)
if len(frg) < smallest_context_len:
smallest_context_len = len(frg)
smallest_context = frg
if smallest_context is None:
print("No contexts found for {}".format(ets_series.id))
import ipdb
ipdb.set_trace()
ret_vals[0] = normalize_whitespace(smallest_context)
# Get index of sentence in min context
sentence_text = normalize_whitespace(sentence.text)
sentence_start_idx = int(ret_vals[0].find(sentence_text))
if sentence_start_idx < 0:
import pdb; pdb.set_trace()
sentence_end_idx = int(sentence_start_idx + len(sentence_text))
ret_vals.append(sentence_start_idx)
ret_vals.append(sentence_end_idx)
# Finally, get genre
text_units = bnc_xml.find_all('wtext')
if not text_units:
text_units = bnc_xml.find_all('stext')
assert len(text_units) == 1
genre = text_units[0].attrs['type']
# Fix genre
genre = GENRE_MAP.get(genre, 'unknown')
ret_vals.append(genre)
# Cache for like sentences (will always be in the same paragraph/div4321)
frg_sent_cache[frg_sent_key] = ret_vals
return ret_vals
def pprint_deps(basicdeps):
"""
Print dependencies according to StanfordNLP.
"""
for dep in basicdeps:
print('{}({}-{}, {}-{})'.format(
dep['dep'],
dep['governorGloss'],
dep['governor'],
dep['dependentGloss'],
dep['dependent'],
))
def extract_arguments(row):
"""
Extract arguments for the given VUAMC row by dependency parsing via
Stanford CoreNLP.
Many verbs are hidden as nouns, modifiers, etc. and do not have
clean subjects/objects. We treat case modifiers as objects as a special
case, but do no further preprocessing.
When there is no subject/object, we return an empty string, rather than
None, to preserve feather format capabilities.
"""
global nlp
if nlp is None:
nlp = StanfordCoreNLP('http://localhost:9000')
raw_str = ' '.join(row.raw_sentence)
output = nlp.annotate(
raw_str, properties={
'annotators': 'depparse,lemma',
'outputFormat': 'json'
})['sentences'][0]
idx_to_lemma = dict(enumerate(
[x['lemma'] for x in output['tokens']], start=1))
# Loop through dependencies, find verb
verb_deps = [
x for x in output['basicDependencies']
if x['governor'] == row.word_offset
]
if not verb_deps:
# Check if gloss is in dependents, with case modifier - if so, put that
# as object
verb_govs = [
x for x in output['basicDependencies']
if x['dependent'] == row.word_offset and x['dep'] == 'case'
]
if not verb_govs:
return '', '', '', ''
if len(verb_govs) > 1:
print("Multiple cases for verb {} id {}".format(row.verb, row.id))
pprint_deps(verb_govs)
# Just take the first one
return ('', verb_govs[0]['governorGloss'].lower(),
'', idx_to_lemma[verb_govs[0]['governor']].lower())
subject = ''
object = ''
subject_lemma = ''
object_lemma = ''
for dep_obj in verb_deps:
if dep_obj['dep'] == 'nsubj':
# Found subject
subject = dep_obj['dependentGloss'].lower()
subject_lemma = idx_to_lemma[dep_obj['dependent']].lower()
elif dep_obj['dep'] == 'dobj':
object = dep_obj['dependentGloss'].lower()
object_lemma = idx_to_lemma[dep_obj['dependent']].lower()
if not subject and not object:
print("No subject/objects found for verb {} id {}".format(
row.verb, row.id))
pprint_deps(verb_deps)
return subject, object, subject_lemma, object_lemma
def fix_word_offset(row):
"""
There are some off-by-one errors given a verb and its offset into the
sentence. This tries to fix it by looking at the raw sentence, the word
index, and the actual verb.
"""
try:
if row.raw_sentence[row.word_offset - 1].lower() != row.verb:
# Check if it's off-by-one
if row.raw_sentence[row.word_offset].lower() == row.verb:
print("Fixing word offset {}".format(row.id))
return row.word_offset + 1
if row.raw_sentence[row.word_offset + 1].lower() == row.verb:
print("Fixing word offset {}".format(row.id))
return row.word_offset + 2
# Just return the first index of the verb itself
lower_sentence = [x.lower() for x in row.raw_sentence]
if row.verb not in lower_sentence:
# Just return the index and hope it's correct -
# some subtle problems e.g. British vs American spelling
return row.word_offset
else:
return lower_sentence.index(row.verb) + 1
# Fine, keep word offset
return row.word_offset
except IndexError:
# End/beginning-of-sentence issues, just trust word index
return row.word_offset
def tokenize_vuamc(sentence, raw=False):
"""
Given a sentence as an XML object, tokenize it
"""
tokens = []
for el in sentence.findChildren():
# If there are children, skip
if len(el.findChildren()) > 0:
continue
tokens.append(el.text)
joined = ' '.join(tokens)
if raw:
return simple_preprocess(joined), joined.split()
return simple_preprocess(joined)
if __name__ == '__main__':
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(
description='VU Amsterdam Parser',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--bnc_xml_folder',
default='/u/nlp/data/bnc_xml/',
help='Path to uncompressed XML version of BNC (should have ABCDEFGHJK toplevel dirs)')
parser.add_argument(
'--lemmatize',
action='store_true',
help='Lemmatize VUAMC')
parser.add_argument(
'--out', default='./vuamc.csv',
help='Output CSV file'
)
args = parser.parse_args()
# Load ETS dataset with links back to VUAMC locations
ets_df = load_ets_annot('./ets/train.jsonlines',
'./ets/train.csv')
vuamc = load_vuamc('./VUAMC.xml')
# Extract sentences (available in VUAMC)
frg_cache = {}
print("Extracting sentences and verbs from VUAMC: train")
ets_df['sentence'], ets_df['raw_sentence'] = zip(*ets_df.progress_apply(
lambda row: get_sentence(vuamc, row, frg_cache), axis=1))
# Extract paragraphs and documents (available in BNC)
print("Extracting broader contexts from original BNC XML: train")
doc_cache = {}
fs_cache = {}
contexts = zip(*ets_df.progress_apply(
lambda row: get_document(row, doc_cache, fs_cache, args.lemmatize, args.bnc_xml_folder), axis=1))
context_names = [
'min_context', 'paragraph', 'utterance', 'div_4', 'div_3', 'div_2',
'div_1', 'sentence_start_idx', 'sentence_end_idx' # IGNORE GENRE HERE, already given
]
del doc_cache, fs_cache
for cname, c in zip(context_names, contexts):
ets_df[cname] = c
# Do the same for test
ets_df_test = load_ets_test('./ets/test.csv')
ets_df_test['word_offset'] = ets_df_test['sentence_offset'].copy()
del ets_df_test['sentence_offset']
print("Extracting sentences and verbs from VUAMC: test")
ets_df_test['sentence'], ets_df_test['raw_sentence'], ets_df_test[
'verb'] = zip(*ets_df_test.progress_apply(
lambda row: get_sentence(vuamc, row, frg_cache, get_verb=True),
axis=1))
del frg_cache
print("Extracting broader contexts from original BNC XML: test")
doc_cache = {}
fs_cache = {}
contexts = zip(*ets_df_test.progress_apply(
lambda row: get_document(row, doc_cache, fs_cache, args.lemmatize, args.bnc_xml_folder), axis=1))
context_names = [
'min_context', 'paragraph', 'utterance', 'div_4', 'div_3', 'div_2',
'div_1', 'sentence_start_idx', 'sentence_end_idx', 'genre'
]
del doc_cache, fs_cache
for cname, c in zip(context_names, contexts):
ets_df_test[cname] = c
ets_df_test['partition'] = 'test'
# Some dummy default values
ets_df_test['sentence_offset'] = 0
ets_df_test['subword_offset'] = 0
ets_df_test['fold_no'] = 0
ets_df = | pd.concat([ets_df, ets_df_test]) | pandas.concat |
import addfips
import os
import pandas as pd
import datetime
ageVariables = {
'DATE': 'date_stamp',
'AGE_RANGE': 'age_group',
'AR_TOTALCASES': 'cnt_confirmed',
'AR_TOTALPERCENT': 'pct_confirmed',
'AR_NEWCASES': 'cnt_confirmed_new',
'AR_NEWPERCENT': 'pct_confirmed_new',
'AR_TOTALDEATHS' : 'cnt_death',
'AR_NEWDEATHS': 'cnt_death_new'
}
countyVariables = {
'DATE': 'date_stamp',
'COUNTY': 'us_county_fips',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSPITALIZED': 'cnt_hospitalized_new',
'TOTAL_HOSPITALIZED': 'cnt_hospitalized',
}
dailyVariables = {
'DATE': 'date_stamp',
'TOTAL_CASES': 'cnt_total',
'NEW_CASES': 'cnt_total_new',
'TOTAL_CONFIRMED': 'cnt_confirmed',
'NEW_CONFIRMED': 'cnt_confirmed_new',
'TOTAL_PROBABLE': 'cnt_probable',
'NEW_PROBABLE': 'cnt_probable_new',
'POS_TESTS': 'cnt_tested_pos',
'NEG_TESTS': 'cnt_tested_neg',
'TOTAL_TESTS': 'cnt_tested',
'NEW_TESTS': 'cnt_tested_new',
'NEW_DEATHS': 'cnt_death_new',
'TOTAL_DEATHS': 'cnt_death',
'NEW_RECOVERED': 'cnt_recovered_new',
'TOTAL_RECOVERED': 'cnt_recovered',
'NEW_ACTIVE': 'cnt_active_new',
'TOTAL_ACTIVE': 'cnt_active',
'NEW_HOSP': 'cnt_hospitalized_new',
'TOTAL_HOSP': 'cnt_hospitalized',
}
raceEthSexVariables = {
'Date': 'date_stamp',
'Category': 'category_type',
'Cat_Detail': 'category_name',
'CAT_DETAIL': 'category_name',
'Cat_CaseCount': 'cnt_confirmed',
'Cat_Percent': 'pct_confirmed',
'CAT_DEATHCOUNT' : 'cnt_death',
'CAT_DEATHPERCENT': 'pct_death'
}
def cleanAgeData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(ageVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Code age ranges
df['age_group'] = df['age_group'].map({ '0-10 years':'00', '11-20 years': '11', '21-30 years': '21', '31-40 years': '31', '41-50 years': '41', '51-60 years': '51', '61-70 years': '61', '71-80 years': '71', '81+ years': '81', 'Pending': '99' })
# multiply the percentages by 100
df['pct_confirmed'] = df['pct_confirmed'].apply(lambda x: round(x*100,4))
df['pct_confirmed_new'] = df['pct_confirmed_new'].apply(lambda x: round(x*100, 4))
#cast count variables to integers
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
# reorder so that the cnt and new are always next to each other in the same order
df = df[['date_stamp', 'age_group', 'cnt_confirmed', 'cnt_confirmed_new', 'pct_confirmed', 'pct_confirmed_new', 'cnt_death', 'cnt_death_new']]
# order the records by date
df = df.sort_values(by=['date_stamp','age_group'], ascending=True)
return df
def cleanCountyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(countyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# Copy original county value to keep the pending and out of state values
df['tn_covid_geo'] = df['us_county_fips']
# Change county name to fips code
af = addfips.AddFIPS()
fips = []
for key, value in df['us_county_fips'].items():
fips.append(af.get_county_fips(value, 'Tennessee'))
df['us_county_fips'] = fips
# Copy appropriate fips codes to covid geo
df.loc[(df['tn_covid_geo'] != 'Pending') & (df['tn_covid_geo'] != 'Out of State'), 'tn_covid_geo'] = df['us_county_fips']
df.loc[df['tn_covid_geo'] == 'Pending', 'tn_covid_geo'] = '47PEN'
df.loc[df['tn_covid_geo'] == 'Out of State', 'tn_covid_geo'] = '47OOS'
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(pd.Int32Dtype())
df['cnt_tested_new'] = df['cnt_tested_new'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype(pd.Int32Dtype())
df['cnt_recovered_new'] = df['cnt_recovered_new'].astype(pd.Int32Dtype())
df['cnt_recovered'] = df['cnt_recovered'].astype(pd.Int32Dtype())
df['cnt_active_new'] = df['cnt_active_new'].astype(pd.Int32Dtype())
df['cnt_active'] = df['cnt_active'].astype(pd.Int32Dtype())
df['cnt_hospitalized_new'] = df['cnt_hospitalized_new'].astype(pd.Int32Dtype())
df['cnt_hospitalized'] = df['cnt_hospitalized'].astype(pd.Int32Dtype())
# reorder so that the total and new are always next to each other in the same order
if 'cnt_probable' in df.columns:
df = df[['date_stamp', 'us_county_fips', 'tn_covid_geo', 'cnt_total', 'cnt_total_new', 'cnt_confirmed', 'cnt_confirmed_new', 'cnt_probable', 'cnt_probable_new', 'cnt_active', 'cnt_active_new', 'cnt_hospitalized', 'cnt_hospitalized_new', 'cnt_recovered', 'cnt_recovered_new', 'cnt_death', 'cnt_death_new', 'cnt_tested_pos', 'cnt_tested_neg', 'cnt_tested', 'cnt_tested_new']]
else:
df = df[['date_stamp', 'us_county_fips', 'tn_covid_geo', 'cnt_total', 'cnt_total_new', 'cnt_confirmed', 'cnt_confirmed_new', 'cnt_active', 'cnt_active_new', 'cnt_hospitalized', 'cnt_hospitalized_new', 'cnt_recovered', 'cnt_recovered_new', 'cnt_death', 'cnt_death_new', 'cnt_tested_pos', 'cnt_tested_neg', 'cnt_tested', 'cnt_tested_new']]
# order the records by date
df = df.sort_values(by='date_stamp', ascending=True)
return df
def cleanDailyData(data):
df = pd.DataFrame(data)
# Rename the file headers
df.rename(dailyVariables, axis="columns", inplace=True)
# Reformat dates
df['date_stamp'] = pd.to_datetime(df['date_stamp'], format='%m-%d-%y')
# format as Integers a none
df['cnt_total'] = df['cnt_total'].astype(pd.Int32Dtype())
df['cnt_total_new'] = df['cnt_total_new'].astype(pd.Int32Dtype())
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
df['cnt_confirmed_new'] = df['cnt_confirmed_new'].astype(pd.Int32Dtype())
if 'cnt_probable' in df.columns:
df['cnt_probable'] = df['cnt_probable'].astype(pd.Int32Dtype())
df['cnt_probable_new'] = df['cnt_probable_new'].astype(pd.Int32Dtype())
df['cnt_tested_pos'] = df['cnt_tested_pos'].astype(pd.Int32Dtype())
df['cnt_tested_neg'] = df['cnt_tested_neg'].astype(pd.Int32Dtype())
df['cnt_tested'] = df['cnt_tested'].astype(pd.Int32Dtype())
df['cnt_tested_new'] = df['cnt_tested_new'].astype(pd.Int32Dtype())
df['cnt_death_new'] = df['cnt_death_new'].astype(pd.Int32Dtype())
df['cnt_death'] = df['cnt_death'].astype( | pd.Int32Dtype() | pandas.Int32Dtype |
import os
from os.path import join
import cobra
from cobra import Model, Reaction, Metabolite
from cobra.flux_analysis.loopless import loopless_solution
from cobra.util.solver import linear_reaction_coefficients
import numpy as np
import scipy as sp
import csv as cp
import pandas as pd
import sys
import re
#######################################################################################
# Function Definition
# hvmKnockout Evaluates the effect of enforcing a knockout state on virus
# production (model generated by genHVM.py)
# Inputs:
# HVM Integrated host-virus model
# HostRxn Name of the host objective reaction
# Outputs:
# koVirus Vector of virus optima values with additional host-constraint
def hvmKnockout(HVM,HostRxn,solver):
"""Reaction Knockouts
new version by Hadrien"""
# [1] Optimise the HVM for host and virus objectives
# Initial Setup
HVM.solver = solver #set solver for all optimisations
hostIdx = next(index for index, reaction in enumerate(HVM.reactions) if reaction.id == HostRxn)
virusIdx = len(HVM.reactions) - 1
objIdx = [hostIdx, virusIdx]
virusRxn = HVM.reactions[-1].id
# Host Optimisation
hostObj = HVM.reactions[hostIdx]
HVM.objective = hostObj.id
hostSol = HVM.optimize()
print("Host optimization objective: {}".format(hostSol.objective_value))
# Virus Optimisation
virusObj = HVM.reactions[-1]
HVM.objective = virusObj.id
virusSol = HVM.optimize()
print("Virus optimization objective: {}".format(virusSol.objective_value))
# [2] Knockout Analysis
koVirus = np.zeros((len(HVM.reactions),5))
# Initiate loop
nbofReactions = len(HVM.reactions)
reactionNames = [None] * nbofReactions
for ii in range(nbofReactions):
# Conditional to exclude objective reactions and those that carry zero flux for virus optima
if ((ii!=hostIdx) and (ii!=virusIdx) and (virusSol.fluxes[ii]!=0.0) and (not HVM.reactions[ii].id.startswith("EX_"))):
print("{}/{}\tKnockout: {}\t".format(ii, nbofReactions, HVM.reactions[ii].id))
# Store reaction info
reactionNames[ii] = HVM.reactions[ii].id
koVirus[ii,0] = ii
# Store original fluxes under host and virus optimisation
koVirus[ii,1] = hostSol.fluxes[ii]
koVirus[ii,2] = virusSol.fluxes[ii]
# Store the bounds
tempLB = HVM.reactions[ii].lower_bound
tempUB = HVM.reactions[ii].upper_bound
# Alter bounds to zero
HVM.reactions[ii].lower_bound = 0
HVM.reactions[ii].upper_bound = 0
# Optimise the model for virus or host production
HVM.objective = virusObj.id
koVirus[ii,3] = HVM.slim_optimize()
HVM.objective = hostObj.id
koVirus[ii,4] = HVM.slim_optimize()
# Return to original bounds
HVM.reactions[ii].lower_bound = tempLB
HVM.reactions[ii].upper_bound = tempUB
else:
koVirus[ii,3] = np.nan
koVirus[ii,4] = np.nan
print("done")
# [3] Post Analysis
# Remove nan values
# Convert to % original optima
for ii in range(len(koVirus)):
koVirus[ii,3] = (koVirus[ii,3] / virusSol.objective_value) * 100
koVirus[ii,4] = (koVirus[ii,4] / hostSol.objective_value) * 100
# [4] Output
outputDf = | pd.DataFrame(data=koVirus, columns=["index", "host_flux", "virus_flux", "virus_optima_KO","host_optima_KO"]) | pandas.DataFrame |
import mealdb_api.api_dict as db
from mealdb_api.call_api_to_df import create_df
import pandas as pd
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
| tm.assert_frame_equal(res, exp) | pandas._testing.assert_frame_equal |
"""
Market Data Provider.
This module contains implementations of the DataProvider abstract class, which
defines methods by which market information can be requested and presented.
"""
from abc import abstractmethod
from io import StringIO
import json
import os
import pathlib
import time
from typing import Any, Dict
import pandas as pd
import psycopg2
import requests
class DataCacheHandler:
"""
An abstract class for handling data caching operations.
"""
def __init__(self, ticker: str, *, memory_cache_size: int = 10):
"""
Init function.
+ticker+ is the ticker.
+memory_cache_size+ is the number of entries to be stored in
memory, to avoid duplicate accesses to persistent storage.
"""
self.ticker = ticker
self.memory_cache_size = memory_cache_size
self._memory_cache = {}
self._memory_cache_history = []
def _check_memory_cache(self, key: str):
"""
Checks for data associated with a given +key+ in the memory cache.
If found, return it, else return None.
"""
if key in self._memory_cache:
cache_entry = self._memory_cache[key]
if len(self._memory_cache) == self.memory_cache_size:
self._memory_cache_history.remove(key)
self._memory_cache_history.append(key)
return cache_entry
return None
def _add_memory_cache(self, key: str, frame: pd.DataFrame):
"""
Adds a +frame+ associated with a given +key+ to the memory cache.
If the cache is full, pops off the least recently accessed entry.
"""
# First, ensure we're not adding a duplicate
if self._check_memory_cache(key) is not None:
return
# If necessary, purge the oldest item from the cache
if len(self._memory_cache) == self.memory_cache_size:
old_name = self._memory_cache_history.pop(0)
del self._memory_cache[old_name]
self._memory_cache[key] = frame
self._memory_cache_history.append(key)
def _get_key_by_timestamp(self, time: pd.Timestamp, interval: str):
"""
Gets the key used to index the local cache for a given +time+ for
data associated with a given +interval+, such as "day".
Note that when 3.10+ is supported, the interval can become an enum.
"""
simple_keys = {
"weekly": "per_week",
"monthly": "per_month"
}
if interval == "daily":
return f"{time.year}_per_day"
if interval == "intraday":
return f"{time.day}_{time.month}_{time.year}_per_minute"
elif interval in simple_keys:
return simple_keys[interval]
raise RuntimeError("Interval '{interval}' not supported!")
def retrieve(self, time: pd.Timestamp, interval: str):
"""
Gets any locally cached data for a given +time+ for data associated
with a given +interval+, such as "day". There are two layers of
caching - one is a direct memory cache of the relevant dataframe,
the other is persistent.
Note that when 3.10+ is supported, +interval+ can become an enum.
"""
# First check the memory cache
key = self._get_key_by_timestamp(time, interval)
data = self._check_memory_cache(key)
if data is not None:
return data
# Next, check the persistent cache
return self._check_persistent_cache(time, interval)
def store(self, time: pd.Timestamp, interval: str, data):
"""
Stores +data+ for a given +time+ and +interval+ into the local cache
heirarchy. There are two layers of caching - one is a direct memory
cache of the relevant dataframe, the other is persistent.
Note that when 3.10+ is supported, +interval+ can become an enum.
"""
# Add to both memory and persistent cache
key = self._get_key_by_timestamp(time, interval)
self._add_memory_cache(key, data)
self._store_persistent_cache(time, interval, data)
def _check_persistent_cache(self, time: pd.Timestamp, interval: str):
"""
Gets any data cached in persistent space for a given +time+ for
data associated with a given +interval+, such as "day".
"""
def _store_persistent_cache(self, time: pd.Timestamp, interval: str, data):
"""
Stores dataframe +data+ for a given +time+ for associated with a given
+interval+ (such as "day") to a persistent space.
"""
class DataProvider:
"""
Abstract class defining the DataProvider API.
"""
def __init__(self, cache_handler: DataCacheHandler):
"""
Init function.
+cache_handler+ is an object which handles local caching operations.
"""
self.cache_handler = cache_handler
@abstractmethod
def intraday(self, day: pd.Timestamp):
"""
Gets the intraday data for a given day.
"""
@abstractmethod
def daily(self, year: pd.Timestamp):
"""
Gets the yearly data for a given +year+.
"""
@abstractmethod
def weekly(self):
"""
Returns a frame containing all weekly data+.
"""
@abstractmethod
def monthly(self):
"""
Returns a frame containing all monthly data.
"""
@abstractmethod
def first(self) -> pd.Timestamp:
"""
Returns the earliest date for which all types of data are available.
"""
@abstractmethod
def latest(self) -> pd.Timestamp:
"""
Returns the latest date for which all types of data are available.
"""
def access_all(self):
"""
Simulates accesses of all kinds. Designed to allow caching
implementations to perform all of their caching up front.
"""
class PostgresDataCacheHandler(DataCacheHandler):
"""
A class handling data caching operations performed via a heirarchy of CSV
files.
"""
def __init__(self, ticker: str, *,
postgres_server: str = "localhost",
postgres_username: str = "",
postgres_database: str = "skintbroker",
**kwargs: Dict[str, Any]):
"""
Init function.
+reqs_per_minute+ is the number of requests allowed per minute.
+ticker+ provides the ticker symbol for the underlying FD.
+memory_cache_size+ is the total number of entries to keep on-hand to
speed up repeated accesses.
+postgres_server+ is the URL of the postgres server used for persistent
caching
+postgres_username+ is the username for the postgres server
+postgres_database+ is the database on the postgres server
NOTE: This object assumes it is the only user of the API key at any
given time, and will attempt the maximum number of accesses possible.
"""
super().__init__(ticker, **kwargs)
self.server = postgres_server
self.username = postgres_username
self.database = postgres_database
self.columns = ['open', 'high', 'low', 'close', 'volume']
# Get Postgres database password
self.password = os.environ.get("SKINTBROKER_AV_POSTGRES_PASS")
if not self.password:
raise RuntimeError("No Postgres database password - please set "
"SKINTBROKER_AV_POSTGRES_PASS")
# Make server connection
self.connection = self.__connect_to_db()
self.cursor = self.connection.cursor()
# Verify or create all tables. In the future, we could also create
# the database, but we'd like to ensure that the user is placing
# information in the right place. Thus, error if the database is
# not present.
self.__verify_tables()
def _check_persistent_cache(self, time: pd.Timestamp, interval: str):
"""
Gets any data cached in persistent space for a given +time+ for
data associated with a given +interval+, such as "day". For
this implementation, this includes searching a file hierarchy.
"""
# First, query the database to and try to get the requisite values.
# Start by generating the query, using conditions based on the
# interval type
query = "SELECT * FROM {ticker}_{interval}"
if interval in ["intraday", "daily"]:
query += f" WHERE EXTRACT(year from \"timestamp\") = {time.year}"
query += f" AND EXTRACT(month from \"timestamp\") = {time.month}"
if interval == "intraday":
query += f" AND EXTRACT(day from \"timestamp\") = {time.day}"
elif interval in ["weekly", "monthly"]:
# Data is small enough that no special condition is needed
pass
else:
raise RuntimeError(f"Unknown interval: {interval}")
query_vars = {'ticker': self.ticker.lower(),
'interval': interval}
# Next, perform the query and generate a dataframe
self.cursor.execute(query.format(**query_vars))
data = self.cursor.fetchall()
frame = pd.DataFrame(data, columns=["timestamp", *self.columns])
# Format the dataframe correctly
frame["timestamp"] = pd.to_datetime(frame["timestamp"])
frame.set_index("timestamp", drop=True, inplace=True)
# Determine if this data is complete or should be ignored
update = True
if not frame.empty:
# If the data isn't sufficiently recent, update anyway. As
# the conditions are rather involved, they're broken up here
# to make them easy to understand
now = _now()
if interval == "intraday":
update = False
if interval == "daily" and \
(time.year != now.year or \
frame.index[0].dayofyear != now.dayofyear):
update = False
now = _now()
if interval == "weekly" and frame.index[0].week == now.week:
update = False
# If the data isn't recent, update
if interval == "monthly" and frame.index[0].month == now.month:
update = False
if not update:
# No update needed, just return the data
key = self._get_key_by_timestamp(time, interval)
self._add_memory_cache(key, frame)
return frame
return None
def _store_persistent_cache(self, time: pd.Timestamp, interval: str, data):
"""
Stores dataframe +data+ for a given +time+ for associated with a given
+interval+ (such as "day") to a persistent space. For this
implementation, store it to a postgres database.
"""
# The original implementation of this function used the standard approach
# of converting the data into an in-memory CSV format, then converting it
# to an SQL request via psycopg2's built-in copy. This method was the
# overall fastest, according to a comparative study, but broke down when
# attempting update-on-conflict inserts (upserts). The new method allows
# upserts, but at the expense of generating a brutally large mega-query,
# the speed of which is at the mercy of psycopg2's internals.
# First, make sure there's some data to work with
if data.empty:
return
# First, genreate the mega-query
query = ""
for index, row in data.iterrows():
query += (f"INSERT INTO {self.ticker.lower()}_{interval} "
"(timestamp, open, high, low, close, volume) VALUES("
f"'{index}', {row['open']}, {row['high']}, "
f"{row['low']}, {row['close']}, {row['volume']}"
") ON CONFLICT (timestamp) DO UPDATE SET "
"(open, high, low, close, volume) = "
"(EXCLUDED.open, EXCLUDED.high, EXCLUDED.low,"
" EXCLUDED.close, EXCLUDED.volume);\n")
# Then execute the mega-query
self.cursor.execute(query)
self.connection.commit()
def __connect_to_db(self):
"""
Establish and return a connection to the postgres database.
"""
# Don't catch any exceptions - they're sufficiently verbose and it's
# best if they just tank the attempt.
return psycopg2.connect(host=self.server,
database=self.database,
user=self.username,
password=self.password)
def __verify_tables(self):
"""
Populates the database, ensuring that all relevant tables are
present. If they're already there, leave them alone.
"""
# First, load the query template
sql_path = pathlib.Path(os.path.dirname(os.path.realpath(__file__)))/"resources"/"sql"
populate_db_path = sql_path/"populate_db.sql"
query = populate_db_path.read_text()
# Next, set up the query variables
query_vars = {'ticker': self.ticker.lower(),
'username': self.username}
# Execute
self.cursor.execute(query.format(**query_vars))
self.connection.commit()
class CSVDataCacheHandler(DataCacheHandler):
"""
A class handling data caching operations performed via a heirarchy of CSV
files.
"""
def __init__(self, ticker: str, *, cache: str = "cache",
**kwargs: Dict[str, Any]):
"""
Init function.
+cache+ provides a directory which the DataProvider can use to
speed up repeated accesses.
"""
super().__init__(ticker, **kwargs)
self.cache = pathlib.Path(cache)
# Ensure the cache is suitable
if self.cache.exists() and not self.cache.is_dir():
raise RuntimeError("Cache must be a directory")
self.cache.mkdir(exist_ok=True, parents=True)
def __get_csv_path(self, time: pd.Timestamp, interval: str):
"""
Gets the CSV associated with a given +time+ and +interval+.
"""
cache_dir = self.cache/self.ticker
key = self._get_key_by_timestamp(time, interval)
if interval == "intraday":
return cache_dir/str(time.year)/str(time.month)/f"{key}.csv"
if interval == "daily":
return cache_dir/str(time.year)/f"{key}.csv"
if interval in ["weekly", "monthly"]:
return cache_dir/f"{key}.csv"
else:
raise RuntimeError("Interval '{interval}' not supported!")
def _check_persistent_cache(self, time: pd.Timestamp, interval: str):
"""
Gets any data cached in persistent space for a given +time+ for
data associated with a given +interval+, such as "day". For
this implementation, this includes searching a file hierarchy.
"""
key = self._get_key_by_timestamp(time, interval)
csv = self.__get_csv_path(time, interval)
update = True
if csv.exists():
frame = pd.read_csv(csv, parse_dates=[0],
infer_datetime_format=True,
index_col='time')
# If the data isn't sufficiently recent, update anyway. As
# the conditions are rather involved, they're broken up here
# to make them easy to understand
now = _now()
if interval == "intraday":
update = False
if interval == "daily" and \
(time.year != now.year or \
frame.index[0].dayofyear != now.dayofyear):
update = False
now = _now()
if interval == "weekly" and frame.index[0].week == now.week:
update = False
# If the data isn't recent, update
if interval == "monthly" and frame.index[0].month == now.month:
update = False
if not update:
# No update needed, just return the data
self._add_memory_cache(key, frame)
return frame
return None
def _store_persistent_cache(self, time: pd.Timestamp, interval: str, data):
"""
Stores dataframe +data+ for a given +time+ for associated with a given
+interval+ (such as "day") to a persistent space. For this
implementation, this involves storing a CSV in a file hierarchy.
"""
csv = self.__get_csv_path(time, interval)
csv_dir = csv.parent
csv_dir.mkdir(exist_ok=True, parents=True)
data.to_csv(csv, index_label='time')
class AVDataProvider(DataProvider):
"""
A subclass of DataProvider which uses the AlphaVantage API.
"""
def __init__(self, ticker: str, *,
reqs_per_minute: int = 5,
**kwargs: Dict[str, Any]):
"""
Init function.
+reqs_per_minute+ is the number of requests allowed per minute.
+ticker+ provides the ticker symbol for the underlying FD.
+memory_cache_size+ is the total number of entries to keep on-hand to
speed up repeated accesses.
NOTE: This object assumes it is the only user of the API key at any
given time, and will attempt the maximum number of accesses possible.
"""
super().__init__(**kwargs)
self.ticker = ticker
self.reqs_per_minute = reqs_per_minute
self._calls = []
# Get AlphaVantage API key
self.api_key = os.environ.get("SKINTBROKER_AV_API_KEY")
if not self.api_key:
raise RuntimeError("No AlphaVantage API key detected - please set "
"SKINTBROKER_AV_API_KEY")
def intraday(self, day: pd.Timestamp):
"""
Gets the intraday data for a given day.
"""
# TODO handle today data
# First, check if the data is already cached
frame = self.cache_handler.retrieve(day, "intraday")
if frame is not None:
return frame
# Otherwise, download it. Intraday data is divided into 30-day
# segments, so first determine just how far back to look.
days = (_now().floor('d') - day.floor('d')).days
month = (days // 30) % 12 + 1
year = (days // 360) + 1
params = {"function": "TIME_SERIES_INTRADAY_EXTENDED",
"interval": "1min",
"symbol": self.ticker,
"slice": f"year{year}month{month}"}
request_frame = self._api_request(**params)
if request_frame.empty:
return None
# Cache all downloaded data - no point in wasting queries!
grouper = pd.Grouper(freq='D')
for date, group in request_frame.groupby(grouper):
self.cache_handler.store(date, "intraday", group)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(day, "intraday")
return frame
def daily(self, year: pd.Timestamp):
"""
Gets the yearly data for a given +year+.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(year, "daily")
if frame is not None:
return frame
# Update from remote
params = {"function": "TIME_SERIES_DAILY",
"symbol": self.ticker,
"outputsize": "full"}
request_frame = self._api_request(**params)
# Cache all returned data
grouper = pd.Grouper(freq='Y')
for date, group in request_frame.groupby(grouper):
self.cache_handler.store(date, "daily", group)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(year, "daily")
return frame
def weekly(self):
"""
Returns a frame containing all weekly data.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(_now(), "weekly")
if frame is not None:
return frame
# Update from remote
# Set up call parameters
params = {"function": "TIME_SERIES_WEEKLY_ADJUSTED",
"symbol": self.ticker}
request_frame = self._api_request(**params)
# Cache returned data.
self.cache_handler.store(_now(), "weekly", request_frame)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(_now(), "weekly")
return frame
def monthly(self):
"""
Returns a frame containing all monthly data.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(_now(), "monthly")
if frame is not None:
return frame
# Update from remote
# Set up call parameters
params = {"function": "TIME_SERIES_MONTHLY_ADJUSTED",
"symbol": self.ticker}
request_frame = self._api_request(**params)
# Cache returned data.
self.cache_handler.store(_now(), "monthly", request_frame)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(_now(), "monthly")
return frame
def _api_request(self, **kwargs: Dict[str, str]) -> pd.DataFrame:
"""
Performs an API request using the passed parameters. Returns a
DataFrame or None.
"""
# Assemble the query
site = "https://www.alphavantage.co/query?"
params = [f"{key}={val}" for key, val in \
{**kwargs, "apikey": self.api_key, "datatype": "csv"}.items()]
query = "&".join(params)
# Perform call limit bookkeeping, and delay if needed.
if len(self._calls) >= self.reqs_per_minute:
oldest_call = self._calls.pop(0)
to_wait = 60 - (_now() - oldest_call).seconds
if to_wait >= 0:
time.sleep(to_wait + 1)
# Call the API and generate the dataframe
print("Querying: " + site + query)
response = requests.get(site + query)
response.encoding = 'utf-8'
index_label = 'time' if "INTRADAY" in kwargs["function"] \
else 'timestamp'
frame = pd.read_csv(StringIO(response.text), parse_dates=[0],
infer_datetime_format=True,
index_col=index_label)
# Record this call for future checks
self._calls.append(_now())
return frame
def first(self) -> pd.Timestamp:
"""
Returns the earliest date for which all types of data are available.
"""
# Based on the AlphaVantage system, it's reasonable to assume data
# exists for two years back from today. Note that it's entirely
# possible that cached data exists from even earlier, so a future
# extension should search for it.
return _now() - pd.Timedelta(720 - 1, unit='d')
def latest(self) -> pd.Timestamp:
"""
Returns the latest date for which all types of data are available.
"""
# Yesterday is fine
return _now() - pd.Timedelta(1, unit='d')
def access_all(self) -> None:
"""
Simulates accesses of all kinds. Designed to allow caching
implementations to perform all of their caching up front.
"""
# First, handle daily, weekly, and monthly entries for the last 20
# years. As this comes in one immense blob, just access that.
now = _now()
self.monthly()
self.weekly()
self.daily(now)
# Then handle intraday for the last 2 years.
days = pd.date_range(end=now, freq='D', periods=360 * 2 - 1)
for day in days:
if day.weekday() <= 4:
self.intraday(day)
class FTXDataProvider(DataProvider):
"""
A subclass of DataProvider which uses the FTX Crypto Exchange API.
"""
def __init__(self, ticker: str, *,
reqs_per_minute: int = 60,
**kwargs: Dict[str, Any]):
"""
Init function.
+reqs_per_minute+ is the number of requests allowed per minute.
+ticker+ provides the ticker symbol for the underlying FD.
+memory_cache_size+ is the total number of entries to keep on-hand to
speed up repeated accesses.
"""
super().__init__(**kwargs)
self.ticker = ticker
self.reqs_per_minute = reqs_per_minute
self._calls = []
def intraday(self, day: pd.Timestamp):
"""
Gets the intraday data for a given day.
"""
# TODO handle today data
# First, make sure there is a timezone associated with the
# data. If not, assume the day starts ate 00:00:00 EST.
if not day.tz:
day = day.tz_localize('EST', nonexistent='shift_backward')
else:
day = day.astimezone('EST')
# Next, check if the data is already cached
frame = self.cache_handler.retrieve(day, "intraday")
if frame is not None:
return frame
# Otherwise, download it. Generate midnight start and end times,
# ensuring that all stay exactly within the bounds of the day.
start = day.floor('d')
end = day + pd.Timedelta(days=1) - pd.Timedelta(minutes=1)
# Perform request. We want a 60 second window length.
query = self._gen_query(start, end, 60)
request_frame = self._api_request(query)
if request_frame.empty:
return None
# Cache all downloaded data - no point in wasting queries!
grouper = pd.Grouper(freq='D')
for date, group in request_frame.groupby(grouper):
self.cache_handler.store(date, "intraday", group)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(day, "intraday")
return frame
def daily(self, year: pd.Timestamp):
"""
Gets the yearly data for a given +year+.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(year, "daily")
if frame is not None:
return frame
# Perform request. We want a one day window length.
query = self._gen_query(self.first(), self.latest(), 86400)
request_frame = self._api_request(query)
if request_frame.empty:
return None
# Cache all returned data
grouper = pd.Grouper(freq='Y')
for date, group in request_frame.groupby(grouper):
self.cache_handler.store(date, "daily", group)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(year, "daily")
return frame
def weekly(self):
"""
Returns a frame containing all weekly data.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(_now(), "weekly")
if frame is not None:
return frame
# Perform request. We want a seven day window length.
query = self._gen_query(self.first(), self.latest(), 86400 * 7)
request_frame = self._api_request(query)
if request_frame.empty:
return None
# Cache returned data.
self.cache_handler.store(_now(), "weekly", request_frame)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(_now(), "weekly")
return frame
def monthly(self):
"""
Returns a frame containing all monthly data.
"""
# First, check if the data is already cached
frame = self.cache_handler.retrieve(_now(), "monthly")
if frame is not None:
return frame
# Perform request. We want a 30 day window length, per an
# archaic standard set by the AlphaVantage provider.
query = self._gen_query(self.first(), self.latest(), 86400 * 30)
request_frame = self._api_request(query)
if request_frame.empty:
return None
# Cache returned data.
self.cache_handler.store(_now(), "monthly", request_frame)
# Try again. If there's still no data, there probably isn't any.
frame = self.cache_handler.retrieve(_now(), "monthly")
return frame
def _gen_query(self, start: pd.Timestamp, end: pd.Timestamp,
interval: int) -> str:
"""
Generates a query string for historic data starting at +start+ and
ending at +end+, with an +interval+ second interval between
entries
Note that any timestamps /must/ be timezone aware.
"""
# Set up parameters. Note that timestamps are in epoch time.
params = {
"start_time": start.timestamp(),
"end_time": end.timestamp(),
"resolution": interval
}
# Generate the query
site = f"https://ftx.com/api/markets/{self.ticker}/USD/candles?"
params = [f"{key}={val}" for key, val in params.items()]
return site + "&".join(params)
def _api_request(self, query: str) -> pd.DataFrame:
"""
Performs a passed API +query+, converting the returned JSON into a
DataFrame.
"""
# Perform call limit bookkeeping, and delay if needed.
if len(self._calls) >= self.reqs_per_minute:
oldest_call = self._calls.pop(0)
to_wait = 60 - (_now() - oldest_call).seconds
if to_wait >= 0:
time.sleep(to_wait + 1)
# Call the API and generate the dataframe
print("Querying: " + query)
response = requests.get(query)
response.encoding = 'utf-8'
# Response is JSON
frame = pd.json_normalize(json.load(StringIO(response.text)), 'result',
errors='ignore')
# Convert to the expected frame format
frame.drop(columns=['time'], inplace=True)
frame.rename(columns={'startTime': 'timestamp'}, inplace=True)
frame['timestamp'] = | pd.to_datetime(frame['timestamp']) | pandas.to_datetime |
"""
@author: Heerozh (<NAME>)
@copyright: Copyright 2019, Heerozh. All rights reserved.
@license: Apache 2.0
@email: <EMAIL>
"""
from typing import Optional, Sequence
from .factor import BaseFactor, CustomFactor
from .datafactor import ColumnDataFactor
from ..parallel import Rolling
import pandas as pd
import numpy as np
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
class CPFCaller:
inputs = None
win = None
callback = None
def split_call(self, splits):
split_inputs = [[]] * len(self.inputs)
for i, data in enumerate(self.inputs):
if isinstance(data, pd.DataFrame):
split_inputs[i] = [data.iloc[beg:end] for beg, end in splits]
else:
split_inputs[i] = [data] * len(splits)
return np.array([self.callback(*params) for params in zip(*split_inputs)])
class CPUParallelFactor(CustomFactor):
"""
Use CPU multi-process/thread instead of GPU to process each window of data.
Useful when your calculations can only be done in the CPU.
The performance of this method is not so ideal, definitely not as fast as
using the vectorization library directly.
"""
def __init__(self, win: Optional[int] = None, inputs: Optional[Sequence[BaseFactor]] = None,
multiprocess=False, core=None):
"""
`multiprocess=True` may not working on windows If your code is written in a notebook cell.
So it is recommended that you write the CPUParallelFactor code in a file.
"""
super().__init__(win, inputs)
for data in inputs:
if isinstance(data, ColumnDataFactor):
raise ValueError('Cannot use ColumnDataFactor in CPUParallelFactor, '
'please use AdjustedColumnDataFactor instead.')
if multiprocess:
self.pool = Pool
else:
self.pool = ThreadPool
if core is None:
self.core = cpu_count()
else:
self.core = core
def compute(self, *inputs):
n_cores = self.core
origin_input = None
date_count = 0
converted_inputs = []
for data in inputs:
if isinstance(data, Rolling):
s = self._revert_to_series(data.last())
unstacked = s.unstack(level=1)
converted_inputs.append(unstacked)
if origin_input is None:
origin_input = s
date_count = len(unstacked)
else:
converted_inputs.append(data)
backwards = self.get_total_backwards_()
first_win_beg = backwards - self.win + 1
first_win_end = backwards + 1
windows = date_count - backwards
ranges = list(zip(range(first_win_beg, first_win_beg + windows),
range(first_win_end, date_count + 1)))
caller = CPFCaller()
caller.inputs = converted_inputs
caller.callback = type(self).mp_compute
if len(ranges) < n_cores:
n_cores = len(ranges)
split_range = np.array_split(ranges, n_cores)
with self.pool(n_cores) as p:
pool_ret = p.map(caller.split_call, split_range)
pool_ret = np.concatenate(pool_ret)
ret = | pd.Series(index=origin_input.index) | pandas.Series |
from microsetta_admin._api import APIRequest
from microsetta_admin.metadata_constants import (
HUMAN_SITE_INVARIANTS,
MISSING_VALUE)
from microsetta_admin.metadata_transforms import (
HUMAN_TRANSFORMS,
apply_transforms)
from collections import Counter
import re
import pandas as pd
# the vioscreen survey currently cannot be fetched from the database
TEMPLATES_TO_IGNORE = {10001, }
EBI_REMOVE = ['ABOUT_YOURSELF_TEXT', 'ANTIBIOTIC_CONDITION',
'ANTIBIOTIC_MED', 'PM_NAME', 'PM_EMAIL',
'BIRTH_MONTH', 'CAT_CONTACT', 'CAT_LOCATION',
'CONDITIONS_MEDICATION', 'DIET_RESTRICTIONS_LIST',
'DOG_CONTACT', 'HUMANS_FREE_TEXT', 'NAME',
'DOG_LOCATION', 'GENDER', 'MEDICATION_LIST',
'OTHER_CONDITIONS_LIST', 'PREGNANT_DUE_DATE',
'RACE_OTHER',
'RELATIONSHIPS_WITH_OTHERS_IN_STUDY',
'SPECIAL_RESTRICTIONS',
'SUPPLEMENTS', 'TRAVEL_LOCATIONS_LIST', 'ZIP_CODE',
'WILLING_TO_BE_CONTACTED', 'pets_other_freetext']
def drop_private_columns(df):
"""Remove columns that should not be shared publicly
Parameters
----------
df : pd.DataFrame
The dataframe to operate on
Returns
-------
pd.DataFrame
The filtered dataframe
"""
# The personal microbiome survey contains additional fields that are
# sensitive in nature
pm_remove = {c.lower() for c in df.columns if c.lower().startswith('pm_')}
remove = pm_remove | {c.lower() for c in EBI_REMOVE}
to_drop = [c for c in df.columns if c.lower() in remove]
return df.drop(columns=to_drop, inplace=False)
def retrieve_metadata(sample_barcodes):
"""Retrieve all sample metadata for the provided barcodes
Parameters
----------
sample_barcodes : Iterable
The barcodes to request
Returns
-------
pd.DataFrame
A DataFrame representation of the sample metadata.
list of dict
A report of the observed errors in the metadata pulldown. The dicts
are composed of {"barcode": list of str | str, "error": str}.
"""
error_report = []
dups, errors = _find_duplicates(sample_barcodes)
if errors is not None:
error_report.append(errors)
fetched = []
for sample_barcode in set(sample_barcodes):
bc_md, errors = _fetch_barcode_metadata(sample_barcode)
if errors is not None:
error_report.append(errors)
continue
fetched.append(bc_md)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import csv
import pandas as pd
print('Welcome User!')
while True:
try:
option=int(input('\nMain Menu:\n1 - Tools Inventory\n2 - Personnel Directory\n3 - BorrowReturn Logbook\n4 - Exit\nSelect: '))
if option not in (1,2,3,4):
print("Invalid Selection")
continue
elif option == 1:
print("\nWELCOME TO TOOLS INVENTORY")
df=pd.read_csv('List_Of_Tools.csv')
df.index = df.index+1
print("\nInventory:")
print(df)
print("\nMENU:\n1 - Add Tools\n2 - Delete Tools\n3 - Modify Tools\n4 - Return")
while True:
try:
option = int(input("SELECT: "))
if option not in (1, 2, 3, 4):
print("Invalid Selection")
continue
elif option == 1:
f=open('List_Of_Tools.csv', 'a+',newline='')
writer=csv.writer(f)
NewItem=[]
name =(input(f"\nName of Tool: "))
while True:
try:
count = int(input("Number of Tools: "))
break
except ValueError:
print("Invalid input")
continue
NewItem.append([name,count])
writer.writerows(NewItem)
print(f"\n",name,"is successfully added to the inventory!")
f.close()
print("\nWELCOME TO TOOLS INVENTORY")
print("\nInventory:")
df=pd.read_csv('List_Of_Tools.csv')
df.index = df.index+1
print(df)
print(f"\nMENU:\n1 - Add Tools\n2 - Delete Tools\n3 - Modify Tools\n4 - Return")
elif option == 2:
f=open("List_Of_Tools.csv","r")
reader=csv.reader(f)
found=0
List=[]
tool=input("What tool do you want to delete? ")
for r in reader:
if (r[0]!=tool):
List.append(r)
else:
found=1
f.close()
if found==0:
print("Tool not found.")
else:
f=open("List_Of_Tools.csv","w",newline='')
writer=csv.writer(f)
writer.writerows(List)
print(f"\n",name,"is successfully deleted!")
f.close()
print("\nWELCOME TO TOOLS INVENTORY")
print("\nInventory:")
df=pd.read_csv('List_Of_Tools.csv')
df.index = df.index+1
print(df)
print(f"\nMENU:\n1 - Add Tools\n2 - Delete Tools\n3 - Update Tools\n4 - Return")
elif option == 3:
f=open("List_Of_Tools.csv","r")
reader=csv.reader(f)
found=0
List=[]
tool=input("What tool do you want to update? ")
for r in reader:
if (r[0]==tool):
r[1]=input("What is the new count for this tool? ")
found=1
List.append(r)
f.close()
if found==0:
print("Tool not found.")
else:
f=open("List_Of_Tools.csv","w",newline='')
writer=csv.writer(f)
writer.writerows(List)
print(f"\n",tool,"is successfully updated!")
f.close()
print("\nWELCOME TO TOOLS INVENTORY")
print("\nInventory:")
df=pd.read_csv('List_Of_Tools.csv')
df.index = df.index+1
print(df)
print(f"\nMENU:\n1 - Add Tools\n2 - Delete Tools\n3 - Modify Tools\n4 - Return")
else:
break
except ValueError:
print('Invalid Input')
elif option == 2:
print("\nWELCOME TO PERSONNEL DIRECTORY")
print("\nPersonnel:")
df=pd.read_csv('List_Of_Personnel.csv')
df.index = df.index+1
print(df)
print(f"\nMENU:\n1 - Search\n2 - Return")
while True:
try:
option = int(input("SELECT: "))
if option not in (1, 2):
print("Invalid Selection")
continue
elif option == 1:
while True:
option = int(input(f"\nSearch by:\n1 - Name\n2 - Role\n3 - Cancel\nSELECT: "))
if option not in (1, 2, 3):
print("Invalid Selection")
continue
elif option == 1:
f=open("List_Of_Personnel.csv","r")
reader=csv.reader(f)
found=0
name=input("\nEnter complete name of personnel: ")
for r in reader:
if (r[0]==str(name)):
found=1
print('\nName: ',r[0],'\nRole: ',r[1],'\nContact Number: ',r[2],'\nEmail Address: ',r[3])
if found==0:
print("Name is not found.")
else:
break
elif option == 2:
f=open("List_Of_Personnel.csv","r")
reader=csv.reader(f)
found=0
role=input("Enter role of personnel: ")
for r in reader:
if (r[1]==role):
found=1
print('\nName: ',r[0],'\nRole: ',r[1],'\nContact Number: ',r[2],'\nEmail Address: ',r[3])
f.close()
if found==0:
print("Role is not found.")
else:
break
else:
break
else:
break
except ValueError:
print('Invalid Input')
elif option == 3:
while True:
try:
df= | pd.read_csv('Borrow_Return_LogBook.csv') | pandas.read_csv |
import pickle
import numpy as np
import pandas as pd
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import BayesianGaussianMixture
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils.testing import ignore_warnings
class DataTransformer(object):
"""Data Transformer.
Model continuous columns with a BayesianGMM and normalized to a scalar
[0, 1] and a vector.
Discrete columns are encoded using a scikit-learn OneHotEncoder.
Args:
n_cluster (int):
Number of modes.
epsilon (float):
Epsilon value.
"""
def __init__(self, n_clusters=10, epsilon=0.005):
self.n_clusters = n_clusters
self.epsilon = epsilon
@ignore_warnings(category=ConvergenceWarning)
def _fit_continuous(self, column, data):
gm = BayesianGaussianMixture(
self.n_clusters,
weight_concentration_prior_type='dirichlet_process',
weight_concentration_prior=0.001,
n_init=1
)
gm.fit(data)
components = gm.weights_ > self.epsilon
num_components = components.sum()
return {
'name': column,
'model': gm,
'components': components,
'output_info': [(1, 'tanh'), (num_components, 'softmax')],
'output_dimensions': 1 + num_components,
}
def _fit_discrete(self, column, data):
ohe = OneHotEncoder(sparse=False)
ohe.fit(data)
categories = len(ohe.categories_[0])
return {
'name': column,
'encoder': ohe,
'output_info': [(categories, 'softmax')],
'output_dimensions': categories
}
def fit(self, data, discrete_columns=tuple()):
self.output_info = []
self.output_dimensions = 0
if not isinstance(data, pd.DataFrame):
self.dataframe = False
data = | pd.DataFrame(data) | pandas.DataFrame |
import os
import random
import re
import pandas
dic=[]
with open('amount_v1v2.dic', 'r', encoding='utf-8') as f:
lines = f.readlines()
dic.extend([l.strip() for l in lines])
print(len(dic))
regdic = []
for word in dic:
if '_' in word:
word = word.replace('_',r'[_\d]?',10**10)
# r = re.compile("%s\s+(\[\s+\])" % word)
# else:
r = re.compile(word + ".+(\[\s+\])")
regdic.append(r)
r = re.compile(word + ".+({.underline})")
regdic.append(r)
r = re.compile(word + ".+([▁__]+)")
regdic.append(r)
def trigger(line):
for i in range(len(dic)):
if dic[i] in line:
return i, dic[i]
return None
def reg_trigger(line):
for i in range(len(regdic)):
match = regdic[i].search(line)
if match:
return i, match.group(1), dic[i//3], match.group(1)
# match = re.compile("(人民币.+元)").search(line)
# if match:
# return 1, match.group(1), "", match.group(1)
# match = re.compile("大写:(.+元)整").search(line)
# if match:
# return 1, match.group(1), "", match.group(1)
match = re.compile("([¥¥]([\u3000\s]+))").search(line)
if match:
return 0, match.group(1), "", match.group(1)
return None
def format2digit(self, word):
trans = ""
if word.startswith('十'):
trans += '1'
for c in word:
if c in self.FORMAL_DIGIT:
trans += self.math_digit[self.FORMAL_DIGIT.index(c)]
if c == '千' and not word.endswith('千'):
if '百' not in word and '十' not in word:
trans += "0"
if word.endswith(c):
if c == "十":
trans += '0'
if c == "百":
trans += '00'
if c == "千":
trans += '000'
return trans
FORMAL_DIGIT="零一二三四五六七八九"
LARGE_FORMAL_DIGIT="零壹贰叁肆伍陆柒捌玖"
DIGIT_PAUSE=',\uFF0C'
DIGIT_SPLIT='.'
CARRY = "十百千万亿"
LARGE_CARRY = "拾佰仟萬億"
PREFIX="人民币"
PREFIX_SIGN="¥¥"
COMMON_UNIT='元'
UNIT='元角分'
math_digit="1234567890"
full_math_digit="\uFF10\uFF11\uFF12\uFF13\uFF14\uFF15\uFF16\uFF17\uFF18\uFF19"
def digit2formal(word, prefix_type = 0):
trans = ""
word = word.replace("万","").replace('元','')
natural = True
fraction = 0
for index, c in enumerate(str(word)):
if c == ".":
natural = False
continue
if natural:
if index % 4 == 1:
trans += '拾'
elif index % 4 == 2:
trans += '佰'
elif index % 4 == 3:
trans += '仟'
elif index == 4:
trans += '万'
elif index == 8:
trans += '亿'
if len(trans)> 1 and trans[-1] == '零' and LARGE_FORMAL_DIGIT[int(c)] =='零':
continue
trans += LARGE_FORMAL_DIGIT[int(c)]
else:
if fraction == 0:
trans = trans[::-1]
trans += "元" + LARGE_FORMAL_DIGIT[int(c)] + "角"
elif fraction == 1:
trans += LARGE_FORMAL_DIGIT[int(c)] + '分'
fraction += 1
if natural:
trans = trans[::-1] + "元"
if prefix_type== 1:
trans = "人民币"+ trans
return trans
amount_len = (1, 16)
category_size = 1000
df = | pandas.read_csv('contract.dic',sep=',',names=['word','frequency']) | pandas.read_csv |
# -*- coding:utf-8 -*-
"""
宏观经济数据类
Created on 2019/01/09
@author: TabQ
@group : gugu
@contact: <EMAIL>
"""
import pandas as pd
import numpy as np
import re
import json
import time
from gugu.utility import Utility
from gugu.base import Base, cf
import sys
class Macro(Base):
def gdpYear(self, retry=3, pause=0.001):
"""
获取年度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp':, ...}, ...]
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK4224641560861/MacPage_Service.get_pagedata?cate=nation&event=0&from=0&num=70&condition=&_=4224641560861
datastr = self.__parsePage('nation', 0, 70, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_YEAR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def gdpQuarter(self, retry=3, pause=0.001):
"""
获取季度国内生产总值数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'quarter':, 'gdp':, ...}, ...]
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3935140379887/MacPage_Service.get_pagedata?cate=nation&event=1&from=0&num=250&condition=&_=3935140379887
datastr = self.__parsePage('nation', 1, 250, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_QUARTER_COLS)
self._data['quarter'] = self._data['quarter'].astype(object)
self._data[self._data==0] = np.NaN
return self._result()
def demandsToGdp(self, retry=3, pause=0.001):
"""
获取三大需求对GDP贡献数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'cons_to':, ...}, ...]
year :统计年度
cons_to :最终消费支出贡献率(%)
cons_rate :最终消费支出拉动(百分点)
asset_to :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_to :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK3153587567694/MacPage_Service.get_pagedata?cate=nation&event=4&from=0&num=80&condition=&_=3153587567694
datastr = self.__parsePage('nation', 4, 80, retry, pause)
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
self._data = pd.DataFrame(js,columns=cf.GDP_FOR_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def idsPullToGdp(self, retry=3, pause=0.001):
"""
获取三大产业对GDP拉动数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp_yoy':, ...}, ...]
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK1083239038283/MacPage_Service.get_pagedata?cate=nation&event=5&from=0&num=60&condition=&_=1083239038283
datastr = self.__parsePage('nation', 5, 60, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_PULL_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def idsCtbToGdp(self, retry=3, pause=0.001):
"""
获取三大产业贡献率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'year':, 'gdp_yoy':, ...}, ...]
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK4658347026358/MacPage_Service.get_pagedata?cate=nation&event=6&from=0&num=60&condition=&_=4658347026358
datastr = self.__parsePage('nation', 6, 60, retry, pause)
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.GDP_CONTRIB_COLS)
self._data[self._data==0] = np.NaN
return self._result()
def cpi(self, retry=3, pause=0.001):
"""
获取居民消费价格指数数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'cpi':,}, ...]
month :统计月份
cpi :价格指数
"""
self._data = pd.DataFrame()
datastr = self.__parsePage('price', 0, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.CPI_COLS)
self._data['cpi'] = self._data['cpi'].astype(float)
return self._result()
def ppi(self, retry=3, pause=0.001):
"""
获取工业品出厂价格指数数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'ppiip':, ...}, ...]
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK6734345383111/MacPage_Service.get_pagedata?cate=price&event=3&from=0&num=600&condition=&_=6734345383111
datastr = self.__parsePage('price', 3, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.PPI_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
self._data[i] = self._data[i].astype(float)
return self._result()
def depositRate(self, retry=3, pause=0.001):
"""
获取存款利率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'deposit_type':, ...}, ...]
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK1250640915421/MacPage_Service.get_pagedata?cate=fininfo&event=2&from=0&num=600&condition=&_=1250640915421
datastr = self.__parsePage('fininfo', 2, 600, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.DEPOSIT_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def loanRate(self, retry=3, pause=0.001):
"""
获取贷款利率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'loan_type':, ...}, ...]
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK7542659823280/MacPage_Service.get_pagedata?cate=fininfo&event=3&from=0&num=800&condition=&_=7542659823280
datastr = self.__parsePage('fininfo', 3, 800, retry, pause)
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.LOAN_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def rrr(self, retry=3, pause=0.001):
"""
获取存款准备金率数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'date':, 'before':, ...}, ...]
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK8028217046046/MacPage_Service.get_pagedata?cate=fininfo&event=4&from=0&num=100&condition=&_=8028217046046
datastr = self.__parsePage('fininfo', 4, 100, retry, pause)
datastr = datastr if self._PY3 else datastr.decode('gbk')
js = json.loads(datastr)
self._data = pd.DataFrame(js, columns=cf.RRR_COLS)
for i in self._data.columns:
self._data[i] = self._data[i].apply(lambda x:np.where(x is None, '--', x))
return self._result()
def moneySupply(self, retry=3, pause=0.001):
"""
获取货币供应量数据
Parameters
--------
retry : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0.001
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
--------
DataFrame or List: [{'month':, 'm2':, ...}, ...]
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
self._data = pd.DataFrame()
# http://money.finance.sina.com.cn/mac/api/jsonp.php/SINAREMOTECALLCALLBACK9019314616219/MacPage_Service.get_pagedata?cate=fininfo&event=1&from=0&num=600&condition=&_=9019314616219
datastr = self.__parsePage('fininfo', 1, 600, retry, pause)
datastr = datastr if self._PY3 else datastr.decode('gbk')
js = json.loads(datastr)
self._data = | pd.DataFrame(js, columns=cf.MONEY_SUPPLY_COLS) | pandas.DataFrame |
#!/usr/bin/python
# take a matching gtf to bedfile and adds annotations
import argparse
import os
import re
import subprocess as sp
from time import time
import warnings
from intervaltree import Interval, IntervalTree
import pandas as pd
from tqdm import tqdm
def load_bed(infile_path: str):
"""
Load and parse a bed file. More information on the bedfile format is here:
https://genome.ucsc.edu/FAQ/FAQformat.html#format1
Arguments:
(REQUIRED) infile_path: path to input bed file
"""
return pd.read_csv(infile_path, sep="\t", header=None, index_col=0)
def load_sizes(infile_path: str, header: bool=None):
"""
Load and parse a gtf file. More information on the gtf format is here:
https://asia.ensembl.org/info/website/upload/gff.html
Arguments:
(REQUIRED) infile_path: path to gtf file
(OPTIONAL) header: headers in size file (DEFAULT: None)
chr1 247249719
chr2 242951149
...
"""
return | pd.read_csv(infile_path, sep="\t", header=None, index_col=0) | pandas.read_csv |
"""
Analytics Vidhya Jobathon
File Description: Model Build for the Health Insurance prediction challenge
Date: 27/02/2021
Author: <EMAIL>
"""
#import required libraries
import pandas as pd
import numpy as np
from datetime import datetime
#import user created libraries
from utils import load_data,prepare_data,save_data,prepare_train_validation_data,\
train_model,train_model_catboost,evaluate,create_submission,test_prediction,explore_algorithms,TARGET,PRIMARY_KEY,\
PROCESSED_TRAIN_DATA_PATH,PROCESSED_TEST_DATA_PATH,SUBMISSION_FILE_PATH,RAW_TRAIN_DATA_PATH,RAW_TEST_DATA_PATH
#set the seed for reproducability
np.random.seed(100)
#load the datasets
modelling_df = load_data(PROCESSED_TRAIN_DATA_PATH)
test_df = load_data(PROCESSED_TEST_DATA_PATH)
#select feature set
EXCLUDE = ['Blank','Holding_Policy_Type','Holding_Policy_Duration']
FEATURES = [col for col in modelling_df.columns if col not in [TARGET,PRIMARY_KEY] + EXCLUDE]
#prepare train + validation data
train_df , validation_df = prepare_train_validation_data(modelling_df)
#fit the model using cross validation grid search
model = train_model(train_df,FEATURES)
#check model performance on holdout data
score = evaluate(validation_df, TARGET, FEATURES,model)
#driver analysis
importances = | pd.DataFrame({'feature':FEATURES,'importance':model.best_estimator_.feature_importances_}) | pandas.DataFrame |
from __future__ import print_function
from __future__ import division
import numpy as np
import scipy.sparse as spa
from builtins import range
import os
import pandas as pd
# Import subprocess to run matlab script
from subprocess import call
from platform import system
# For importing python modules from string
import importlib
class QPmatrices(object):
"""
QP problem matrices
q_vecs is the matrix containing different linear costs
"""
def __init__(self, P, q_vecs, A, l, u, n, m):
self.P = P
self.q_vecs = q_vecs
self.A = A
self.l = l
self.u = u
self.n = n
self.m = m
def gen_qp_matrices(m, n, gammas):
"""
Generate QP matrices for lasso problem
"""
# Reset random seed for repetibility
np.random.seed(1)
# Problem parameters
dens_lvl = 0.4
# Generate data
Ad = spa.random(m, n, density=dens_lvl, format='csc')
x_true = np.multiply((np.random.rand(n) > 0.5).astype(float),
np.random.randn(n)) / np.sqrt(n)
bd = Ad.dot(x_true) + .5*np.random.randn(m)
# minimize y.T * y + gamma * np.ones(n).T * t
# subject to y = Ax - b
# -t <= x <= t
P = spa.block_diag((spa.csc_matrix((n, n)), spa.eye(m),
spa.csc_matrix((n, n))), format='csc')
# q = np.append(np.zeros(m + n), gamma*np.ones(n))
In = spa.eye(n)
Onm = spa.csc_matrix((n, m))
A = spa.vstack([spa.hstack([Ad, -spa.eye(m), Onm.T]),
spa.hstack([In, Onm, In]),
spa.hstack([-In, Onm, In])]).tocsc()
l = np.hstack([bd, np.zeros(2*n)])
u = np.hstack([bd, np.inf * np.ones(2*n)])
# Create linear cost vectors
q_vecs = np.empty((2*n + m, 0))
for gamma in gammas:
q_vecs = np.column_stack(
(q_vecs, np.append(np.zeros(n+m), gamma*np.ones(n))))
qp_matrices = QPmatrices(P, q_vecs, A, l, u, n, m)
# Return QP matrices
return qp_matrices
def solve_loop(qp_matrices, solver='emosqp'):
"""
Solve portfolio optimization loop for all gammas
"""
# Shorter name for qp_matrices
qp = qp_matrices
print('\nSolving lasso problem loop for n = %d and solver %s' %
(qp.n, solver))
# Get number of problems to solve
n_prob = qp.q_vecs.shape[1]
# Results list
results = []
if solver == 'emosqp':
# Pass the data to OSQP
m = osqp.OSQP()
m.setup(qp.P, qp.q_vecs[:, 0], qp.A, qp.l, qp.u,
rho=10., verbose=False)
# Get extension name
module_name = 'emosqpn%s' % str(qp.n)
# Generate the code
m.codegen("code", python_ext_name=module_name, force_rewrite=True)
# Import module
emosqp = importlib.import_module(module_name)
for i in range(n_prob):
q = qp.q_vecs[:, i]
# Update linear cost
emosqp.update_lin_cost(q)
# Solve
x, y, status, niter, time = emosqp.solve()
# Check if status correct
if status != 1:
print('OSQP did not solve the problem!')
import ipdb
ipdb.set_trace()
raise ValueError('OSQP did not solve the problem!')
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [time],
'iter': [niter],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
elif solver == 'qpoases':
n_dim = qp.P.shape[0]
m_dim = qp.A.shape[0]
# Initialize qpoases and set options
qpoases_m = qpoases.PyQProblem(n_dim, m_dim)
options = qpoases.PyOptions()
options.printLevel = qpoases.PyPrintLevel.NONE
qpoases_m.setOptions(options)
# Setup matrix P and A
P = np.ascontiguousarray(qp.P.todense())
A = np.ascontiguousarray(qp.A.todense())
for i in range(n_prob):
# Get linera cost as contiguous array
q = np.ascontiguousarray(qp.q_vecs[:, i])
# Reset cpu time
qpoases_cpu_time = np.array([10.])
# Reset number of of working set recalculations
nWSR = np.array([10000])
if i == 0:
res_qpoases = qpoases_m.init(P, q, A, None, None, qp.l, qp.u,
nWSR, qpoases_cpu_time)
else:
# Solve new hot started problem
res_qpoases = qpoases_m.hotstart(q, None, None, qp.l, qp.u,
nWSR, qpoases_cpu_time)
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [qpoases_cpu_time[0]],
'iter': [nWSR[0]],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
elif solver == 'gurobi':
n_dim = qp.P.shape[0]
m_dim = qp.A.shape[0]
for i in range(n_prob):
# Get linera cost as contiguous array
q = np.ascontiguousarray(qp.q_vecs[:, i])
# solve with gurobi
prob = mpbpy.QuadprogProblem(qp.P, q, qp.A, qp.l, qp.u)
res = prob.solve(solver=mpbpy.GUROBI, verbose=False)
# Solution statistics
solution_dict = {'solver': [solver],
'runtime': [res.cputime],
'iter': [res.total_iter],
'n': [qp.n]}
results.append(pd.DataFrame(solution_dict))
else:
raise ValueError('Solver not understood')
return pd.concat(results)
'''
Problem parameters
'''
# Generate gamma parameters and cost vectors
n_gamma = 21
gammas = np.logspace(2, -2, n_gamma)
# Number of parameters
n_vec = np.array([10, 20, 30, 50, 80, 100, 150, 200, 250, 300, 350, 400])
# Measurements
m_vec = (10 * n_vec).astype(int)
# Setup if solve with gurobi/qpoases or not
solve_osqp = True
solve_gurobi = True
solve_qpoases = True
# Define statistics for osqp, gurobi and qpoases
if solve_osqp:
import osqp
osqp_stats = []
problem_stats = []
if solve_gurobi:
import mathprogbasepy as mpbpy
gurobi_stats = []
if solve_qpoases:
import qpoases
qpoases_stats = []
# Size of the exe file generated by OSQP
if solve_osqp:
if system() == 'Windows':
cmdsep = '&'
makefile = '"MinGW Makefiles"'
example_fullname = 'example.exe'
else:
cmdsep = ';'
makefile = '"Unix Makefiles"'
example_fullname = 'example'
'''
Solve problems
'''
for i in range(len(n_vec)):
# Generate QP sparse matrices
qp_matrices = gen_qp_matrices(m_vec[i], n_vec[i], gammas)
if solve_osqp:
# Solving loop with emosqp
stats = solve_loop(qp_matrices, 'emosqp')
osqp_stats.append(stats)
# Get size of the generated exe file in KB
call('cd code %s ' % (cmdsep) +
'mkdir build %s ' % (cmdsep) +
'cd build %s ' % (cmdsep) +
'cmake -G %s .. %s ' % (makefile, cmdsep) +
' cmake --build .',
shell=True)
example_path = os.path.join('code', 'build', 'out', example_fullname)
example_size = int(round(os.path.getsize(example_path) / 1024.))
# Problem statistics
N = qp_matrices.P.nnz + qp_matrices.A.nnz
problem_dict = {'n': [qp_matrices.n],
'm': [qp_matrices.m],
'N': [N],
'filesize': example_size}
problem_stats.append(pd.DataFrame(problem_dict))
if solve_qpoases:
# Solving loop with qpoases
stats = solve_loop(qp_matrices, 'qpoases')
qpoases_stats.append(stats)
if solve_gurobi:
# Solve loop with gurobi
stats = solve_loop(qp_matrices, 'gurobi')
gurobi_stats.append(stats)
'''
Store results in CSV files
'''
if solve_osqp:
# Combine OSQP stats and store them in a CSV file
df = pd.concat(osqp_stats)
df.to_csv('osqp_stats.csv', index=False)
# Combine problem stats and store them in a CSV file
df = | pd.concat(problem_stats) | pandas.concat |
import pandas as pd
from decision_tree import DecisionTree
import sys
def benchmark_dataset():
benchmark_dataset = pd.read_csv('data/benchmark_dataset.tsv', sep='\t')
test_dataset = pd.DataFrame(data={
'Tempo': ['Ensolarado', 'Nublado'],
'Temperatura': ['Quente', 'Quente'],
'Umidade': ['Alta', 'Alta'],
'Ventoso': ['Falso', 'Falso']
})
decision_tree = DecisionTree(
classification_attribute='Joga',
attribute_types={
'Tempo': 'discrete',
'Temperatura': 'discrete',
'Umidade': 'discrete',
'Ventoso': 'discrete'
}
)
decision_tree.train(benchmark_dataset)
predictions = decision_tree.predict(test_dataset)
sys.stdout.write('\x1b[1;34m' + 'Benchmark dataset - resultant decision tree:' + '\x1b[0m' + '\n\n')
decision_tree.show()
print('Test dataset:')
print(test_dataset)
print(f'\nPredictions: {predictions}\n\n')
def votes_dataset():
# Votes dataset
train_dataframe = | pd.read_csv('data/house_votes_84.tsv', sep='\t') | pandas.read_csv |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
Index([np.nan, 1]),
Index([1, 2, np.nan]),
Index(['a', 'b', np.nan]),
pd.to_datetime(['NaT']),
pd.to_datetime(['NaT', '2000-01-01']),
pd.to_datetime(['2000-01-01', 'NaT', '2000-01-02']),
pd.to_timedelta(['1 day', 'NaT']),
]
for index in examples:
self.assertFalse(index.is_monotonic_increasing)
self.assertFalse(index.is_monotonic_decreasing)
def test_equals(self):
same_values = Index(self.index, dtype=object)
self.assertTrue(self.index.equals(same_values))
self.assertTrue(same_values.equals(self.index))
def test_identical(self):
i = Index(self.index.copy())
self.assertTrue(i.identical(self.index))
same_values_different_type = Index(i, dtype=object)
self.assertFalse(i.identical(same_values_different_type))
i = self.index.copy(dtype=object)
i = i.rename('foo')
same_values = Index(i, dtype=object)
self.assertTrue(same_values.identical(self.index.copy(dtype=object)))
self.assertFalse(i.identical(self.index))
self.assertTrue(Index(same_values, name='foo', dtype=object
).identical(i))
self.assertFalse(
self.index.copy(dtype=object)
.identical(self.index.copy(dtype='int64')))
def test_get_indexer(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target)
expected = np.array([0, -1, 1, -1, 2, -1, 3, -1, 4, -1])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_pad(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='pad')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
self.assert_numpy_array_equal(indexer, expected)
def test_get_indexer_backfill(self):
target = Int64Index(np.arange(10))
indexer = self.index.get_indexer(target, method='backfill')
expected = np.array([0, 1, 1, 2, 2, 3, 3, 4, 4, 5])
self.assert_numpy_array_equal(indexer, expected)
def test_join_outer(self):
other = | Int64Index([7, 12, 25, 1, 2, 5]) | pandas.core.index.Int64Index |
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
from dtoolkit.util import multi_if_else
if TYPE_CHECKING:
from typing import Iterable
from dtoolkit._typing import OneDimArray
from dtoolkit._typing import SeriesOrFrame
from dtoolkit._typing import TwoDimArray
def get_inf_range(inf: str = "all") -> list[float]:
return multi_if_else(
[
(inf == "all", [np.inf, -np.inf]),
(inf == "pos", [np.inf]),
(inf == "neg", [-np.inf]),
(inf is not None, ValueError(f"invalid inf option: {inf}")),
],
TypeError("must specify inf"),
)
def get_mask(how: str, mask: TwoDimArray, axis: int) -> OneDimArray:
return multi_if_else(
[
(how == "any", mask.any(axis=axis)),
(how == "all", mask.all(axis=axis)),
(how is not None, ValueError(f"invalid inf option: {how}")),
],
TypeError("must specify how"),
)
def isin(
df: pd.DataFrame,
values: Iterable | SeriesOrFrame | dict[str, list[str]],
axis: int | str = 0,
) -> pd.DataFrame:
"""
Extend :meth:`~pandas.DataFrame.isin` function. When ``values`` is
:obj:`dict` and ``axis`` is 1, ``values``' key could be index name.
"""
from collections import defaultdict
axis = df._get_axis_number(axis)
if isinstance(values, dict) and axis == 1:
values = defaultdict(list, values)
result = (df.iloc[[r]].isin(values[i]) for r, i in enumerate(df.index))
return | pd.concat(result, axis=0) | pandas.concat |
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: Apache Software License 2.0
"""Module containing functionality to plot stacked bar charts."""
import matplotlib
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
from nannyml.plots.colors import Colors
def _create_value_counts_table(
feature_table,
feature_column_name,
chunk_column_name,
missing_category_label,
max_number_of_categories,
):
value_counts_table = feature_table[[chunk_column_name, feature_column_name]].copy()
value_counts_table[feature_column_name] = value_counts_table[feature_column_name].fillna(missing_category_label)
if max_number_of_categories:
top_categories = (
value_counts_table[feature_column_name].value_counts().index.tolist()[:max_number_of_categories]
)
if value_counts_table[feature_column_name].nunique() > max_number_of_categories + 1:
value_counts_table.loc[
~value_counts_table[feature_column_name].isin(top_categories), feature_column_name
] = 'Other'
categories_ordered = value_counts_table[feature_column_name].value_counts().index.tolist()
value_counts_table[feature_column_name] = pd.Categorical(
value_counts_table[feature_column_name], categories_ordered
)
value_counts_table = (
value_counts_table.groupby(chunk_column_name)[feature_column_name]
.value_counts()
.to_frame('value_counts')
.reset_index()
.rename(columns={'level_1': feature_column_name})
)
value_counts_table['value_counts_total'] = value_counts_table[chunk_column_name].map(
value_counts_table.groupby(chunk_column_name)['value_counts'].sum()
)
value_counts_table['value_counts_normalised'] = (
value_counts_table['value_counts'] / value_counts_table['value_counts_total']
)
return value_counts_table
def _create_stacked_bar_table(
drift_table,
value_counts_table,
start_date_column_name,
end_date_column_name,
chunk_type_column_name,
chunk_column_name,
drift_column_name,
chunk_types,
date_label_hover_format,
):
stacked_bar_table = | pd.merge(drift_table, value_counts_table, on=chunk_column_name) | pandas.merge |
# County median home prices (FHFA HPI & Census)
import pandas as pd
import numpy as np
import array
# House price data based on FHFA House Price Index
hpi_df = pd.read_excel('HPI_AT_BDL_county.xlsx', skiprows=6) # Source: https://www.fhfa.gov/PolicyProgramsResearch/Research/Pages/wp1601.aspx
# Replacing null HPI values w/ avg. of values before and after (if they exist- otherwise, values remain null)
hpi_df.HPI = hpi_df.HPI.replace('.', np.nan).astype(float)
hpi_df.HPI = hpi_df.HPI.fillna((hpi_df.HPI.shift() + hpi_df.HPI.shift(-1))/2) # Source: https://stackoverflow.com/questions/44032771/fill-cell-containing-nan-with-average-of-value-before-and-after
# Filling the remaining missing HPI cells w/ the next most recent HPI value (since the relevant missing HPIs are only from 2013 onwards, the HPI will be the most recent value from the same FIPS)
hpi_df.HPI = hpi_df.HPI.fillna(method='ffill')
# Adding county/state column (to displaying location via interactive hover map)
counties = pd.read_excel('CLF01.xls', usecols=[0,1])[2:] # Source: https://www.census.gov/library/publications/2011/compendia/usa-counties-2011.html
areaname_dict = {}
for fips in counties['STCOU']:
area = counties.Areaname[counties.STCOU == fips].iloc[0]
areaname_dict[fips] = area
hpi_df['Areaname'] = hpi_df['FIPS code'].map(areaname_dict)
hpi_df = hpi_df.rename({'FIPS code': 'FIPS'}, axis=1)
hpi_df = hpi_df[['Year', 'State', 'FIPS', 'HPI', 'Areaname']]
hpi_df = hpi_df.sort_values(['FIPS', 'Year']).reset_index(drop=True)
#--------------------------------------------------------------------------------------------------#
# Converting HPI values to House Prices:
# 2012-2016 median home prices (using this data instead of 2013-2017 data since 25 records were missing 2017 HPIs, whereas 2016 was only missing 13)
home_prices1216 = pd.read_csv('ACS_16_5YR_Home_Prices.csv', skiprows=3, usecols=[4,6,7], encoding='latin') # Source: https://factfinder.census.gov/ (searched for "Median Value of Owner-Occupied" in *advanced* search)
home_prices1216.columns = ['FIPS', 'County', 'Med_Price']
home_prices1216 = home_prices1216[home_prices1216.Med_Price != '-'] # Only two rows contained this filler value
home_prices1216['Med_Price'] = home_prices1216['Med_Price'].astype(int)
# Calculating annual home prices based on 2012-2016 avg. price & 2014 HPI
adj_prices = array.array('f')
for fips in hpi_df.FIPS[hpi_df.Year == 2016].unique():
hpi14 = hpi_df.HPI[(hpi_df.FIPS == fips) & (hpi_df.Year == 2014)].iloc[0]
price1216 = home_prices1216.Med_Price[home_prices1216.FIPS == fips].iloc[0]
df = hpi_df[hpi_df.FIPS == fips]
adj_prices.extend(df['HPI'] / hpi14 * price1216)
hpi_df['Med_House_Price'] = [int(round(x,0)) for x in adj_prices]
#--------------------------------------------------------------------------------------------------#
# Adding Zillow House Price Data
# (data for counties missing from hpi_df - additional 78 counties)
zillow = pd.read_csv('Zillow County Monthly.csv', usecols=[1,2,4,5] + list(range(15, 280, 12)), encoding='latin') # Source: https://www.zillow.com/research/data/ (Home Values - ZHVI All Homes - County)
zillow['FIPS'] = (zillow.StateCodeFIPS.astype(str) + zillow.MunicipalCodeFIPS.astype(str).apply(lambda x:x.zfill(3))).astype(int)
zillow = zillow.drop(['StateCodeFIPS', 'MunicipalCodeFIPS'], axis=1)
zillow.columns = ['County', 'State'] + [str(yr) for yr in range(1996, 2019)] + ['FIPS']
zillowfips = list(zillow.FIPS.sort_values().values)
zillow_extra = []
for fips in zillowfips:
if fips not in hpi_df.FIPS.unique():
zillow_extra.append(fips)
# Calculating state avg. house price % change (to replace zillow records w/ less than 8 price values recorded, since a larger sample is needed for a more accurate estimate)
pct_chg_dict = {}
for state in hpi_df.State.unique():
st_df = hpi_df[hpi_df.State == state]
st_chgs = []
for fips in st_df.FIPS.unique():
fp_df = st_df[st_df.FIPS == fips]
st_chgs.append(fp_df.HPI.pct_change().mean())
pct_chg_dict[state] = sum(st_chgs) / len(st_chgs)
zillow_new = pd.DataFrame()
for fips in zillow_extra:
new_prices = []
df = zillow[zillow.FIPS == fips].dropna(axis=1) # Only keeping cols of years w/ no nulls
if len(df.columns) > 8: # Excludes 4 FIPS (too few home price values to calculate accurate rate of price change)
cols = df.columns[2:-1] # (96-18 cols)
prices = list(df[cols].iloc[0].astype(int).values)
avg_chg = pd.Series(prices).pct_change().mean()
earliest_price = df.iloc[0][2] # 96 price or earliest yr available after
earliest_yr = int(cols[0])
missing_yrs = range(earliest_yr - 1, 1988, -1) # earliest year to 1991 (backwards)
state = df.State.iloc[0]
if len(df.columns) < 11: # i.e. 9 or 10
avg_chg = pct_chg_dict[state] # If not previously assigned above
for yr in missing_yrs:
new_earliest = int(round(earliest_price/(1 + avg_chg),0))
new_prices.append(new_earliest)
earliest_price = new_earliest
new_prices.reverse()
new_df = pd.DataFrame({'Year':range(1989, 2019), 'FIPS':list(np.full(30, fips)), 'Med_House_Price':(new_prices + prices)})
zillow_new = pd.concat([zillow_new, new_df])
#--------------------------------------------------------------------------------------------------#
# Combining Zillow & FHFA data
no_HPI = hpi_df.drop('HPI', axis=1)
zillow_new['Areaname'] = zillow_new['FIPS'].map(areaname_dict)
zillow_new['State'] = [x.split(', ')[1] for x in zillow_new['Areaname'].values]
zillow_new = zillow_new[no_HPI.columns]
combined = | pd.concat([no_HPI, zillow_new]) | pandas.concat |
import pandas
from openff.evaluator.datasets import PhysicalPropertyDataSet
from openff.evaluator.properties import enthalpy, density
from openff.toolkit.typing.engines.smirnoff import ForceField
import os
import numpy as np
import torch
from LJ_surrogates.surrogates.surrogate import build_surrogate_lightweight, build_surrogates_loo_cv, \
build_surrogate_lightweight_botorch, build_multisurrogate_lightweight_botorch
import matplotlib.pyplot as plt
import tqdm
import copy
import gpytorch
def collate_physical_property_data(directory, smirks, initial_forcefield, properties_filepath, device):
data = []
for i in range(int(len(os.listdir(directory)) / 2)):
if os.path.isfile(os.path.join(directory, 'force_field_' + str(i + 1) + '.offxml')) and os.path.isfile(
os.path.join(directory, 'estimated_data_set_' + str(i + 1) + '.json')):
forcefield = ForceField(os.path.join(directory, 'force_field_' + str(i + 1) + '.offxml'))
results = PhysicalPropertyDataSet.from_json(
os.path.join(directory, 'estimated_data_set_' + str(i + 1) + '.json'))
parameters = get_force_field_parameters(forcefield, smirks)
if len(results.estimated_properties) != 0:
data.append([results.estimated_properties, parameters])
# if len(results) != 0:
# data.append([results, parameters])
print(f'Started with {i + 1} datasets, removed {i + 1 - len(data)} empty dataset(s)')
initial_forcefield = ForceField(initial_forcefield)
initial_parameters = get_force_field_parameters(initial_forcefield, smirks)
properties = PhysicalPropertyDataSet.from_json(properties_filepath)
dataplex = get_training_data_new(data, properties, initial_parameters, device)
# dataplex.plot_properties()
# properties_all = get_training_data(data)
return dataplex
def get_force_field_parameters(forcefield, smirks):
lj_params = forcefield.get_parameter_handler('vdW', allow_cosmetic_attributes=True)
param_dict = {}
for i in range(len(lj_params.parameters)):
if lj_params[i].smirks in smirks:
param_dict[lj_params[i].smirks] = [lj_params[i].epsilon, lj_params[i].rmin_half]
return param_dict
class LJParameter:
def __init__(self, identity, values):
self.smirks = identity
self.epsilon = values[0]
self.rmin_half = values[1]
class ParameterSetData:
def __init__(self, datum):
self.property_measurements = datum[0]
self.parameters = []
for key in datum[1].keys():
self.parameters.append(LJParameter(key, datum[1][key]))
class ParameterSetDataMultiplex:
def __init__(self, ParameterDataSetList, InitialProperties, InitialParameters, device):
self.multi_data = ParameterDataSetList
self.parameters = self.multi_data[0].parameters
self.properties = InitialProperties
self.initial_parameters = InitialParameters
self.device = device
def align_data(self):
pass
def check_parameters(self):
multi_data = []
equality = True
for dataset in self.multi_data:
for i in range(len(self.parameters)):
if dataset.parameters[i].smirks != self.parameters[i].smirks:
equality = False
if equality is True:
multi_data.append(dataset)
self.multi_data = multi_data
def check_properties(self):
multi_data = []
failed_data = []
self.properties = canonicalize_dataset(self.properties)
for dataset in self.multi_data:
dataset.property_measurements = canonicalize_dataset(dataset.property_measurements)
equality = True
if len(self.properties) != len(dataset.property_measurements):
equality = False
else:
for i in range(len(self.properties)):
if type(self.properties.properties[i]) != type(dataset.property_measurements.properties[i]) or \
self.properties.properties[i].substance != dataset.property_measurements.properties[
i].substance \
or self.properties.properties[i].thermodynamic_state != \
dataset.property_measurements.properties[i].thermodynamic_state:
equality = False
if equality == True:
multi_data.append(dataset)
else:
failed_data.append(dataset)
self.multi_data = multi_data
self.failed_data = failed_data
def prune_bad_densities(self):
print('Eliminating Bad Density Measurements...')
before = len(self.multi_data)
self.bad_density_data = []
for i, property in enumerate(self.properties.properties):
if self.property_labels[i].endswith('Density'):
to_pop = []
for j, measurement in enumerate(self.multi_data):
if measurement.property_measurements.properties[i].value.m <= 0.1 * property.value.m:
to_pop.append(j)
for pop in sorted(to_pop, reverse=True):
self.bad_density_data.append(self.multi_data[pop])
del self.multi_data[pop]
after = len(self.multi_data)
print(f"Removed {before - after} datasets due to bad density measurments")
def prune_low_aa_hvaps(self):
print('Removing Low Energy Acetic Acid meaasurements')
before = len(self.multi_data)
self.bad_hvap_data = []
for i, property in enumerate(self.properties.properties):
if self.property_labels[i] == 'CC(=O)O{solv}{x=1.000000}_EnthalpyOfVaporization':
to_pop = []
for j, measurement in enumerate(self.multi_data):
if measurement.property_measurements.properties[i].value.m <= 60:
to_pop.append(j)
for pop in sorted(to_pop, reverse=True):
self.bad_hvap_data.append(self.multi_data[pop])
del self.multi_data[pop]
after = len(self.multi_data)
print(f"Removed {before - after} datasets due to low acetic acid hvap measurments")
def align_property_data(self):
parameter_labels = []
for parameter in self.parameters:
parameter_labels.append(parameter.smirks + '_epsilon')
parameter_labels.append(parameter.smirks + '_rmin_half')
property_labels = []
for property in self.properties.properties:
property_type = str(type(property)).split(sep='.')[-1].rstrip("'>")
property_labels.append(str(property.substance) + "_" + property_type)
self.parameter_labels = parameter_labels
self.property_labels = property_labels
self.prune_bad_densities()
self.prune_low_aa_hvaps()
property_measurements = []
property_uncertainties = []
for data in self.multi_data:
measurements = []
uncertainties = []
for property in data.property_measurements.properties:
measurements.append(property.value.m)
uncertainties.append(property.uncertainty.m)
property_measurements.append(measurements)
property_uncertainties.append(uncertainties)
all_parameters = []
failed_parameters = []
bad_density_parameters = []
for data in self.multi_data:
parameters = []
for parameter in data.parameters:
parameters.append(parameter.epsilon._value)
parameters.append(parameter.rmin_half._value)
all_parameters.append(parameters)
for data in self.failed_data:
parameters = []
for parameter in data.parameters:
parameters.append(parameter.epsilon._value)
parameters.append(parameter.rmin_half._value)
failed_parameters.append(parameters)
for data in self.bad_density_data:
parameters = []
for parameter in data.parameters:
parameters.append(parameter.epsilon._value)
parameters.append(parameter.rmin_half._value)
bad_density_parameters.append(parameters)
all_parameters = np.asarray(all_parameters)
failed_parameters = np.asarray(failed_parameters)
bad_density_parameters = np.asarray(bad_density_parameters)
property_measurements = np.asarray(property_measurements)
property_uncertainties = np.asarray(property_uncertainties)
if len(failed_parameters) > 0:
self.failed_params_values = pandas.DataFrame(failed_parameters, columns=parameter_labels)
self.parameter_values = pandas.DataFrame(all_parameters, columns=parameter_labels)
if len(bad_density_parameters) > 0:
self.bad_density_param_values = pandas.DataFrame(bad_density_parameters, columns=parameter_labels)
self.plot_parameter_sets()
self.property_measurements = | pandas.DataFrame(property_measurements, columns=property_labels) | pandas.DataFrame |
import pandas as pd
import os
from training.config import Config
class GeneExp:
"""
Class to get RNA-Seq data from given cell type.
Alter it and provide for classification.
"""
def __init__(self, cfg, chr):
self.cfg = cfg
self.gene_info = None
self.pc_data = None
self.nc_data = None
self.rb_data = None
self.chr = str(chr)
self.gene_exp_path = os.path.join(cfg.downstream_dir, "RNA-seq")
self.gene_exp_file = os.path.join(self.gene_exp_path, "Ensembl_v65.Gencode_v10.ENSG.gene_info")
self.pc_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.pc.gz")
self.nc_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.nc.gz")
self.rb_file = os.path.join(self.gene_exp_path, "57epigenomes.RPKM.rb.gz")
if cfg.cell == "GM12878":
self.cell_column = "E116"
elif cfg.cell == "H1hESC":
self.cell_column = "E003"
elif cfg.cell == "HFFhTERT":
self.cell_column = "E055"
def get_rna_seq(self):
"""
get_rna_seq() -> No return object
Gets RNA-Deq data for PC, NC, and RB modes.
Args:
NA
"""
self.gene_info = | pd.read_csv(self.gene_exp_file, sep="\s+", header=None) | pandas.read_csv |
# -*- coding: utf-8 -*-
# @Time : 2020/8/10-17:42
# @Author : 贾志凯
# @File : pre.py
# @Software: win10 python3.6 PyCharm
import pysoftNLP.kashgari as kashgari
import re
import time
import os
import pandas as pd
def load_model(model_name = 'ner'):
basis = 'D:\pysoftNLP_resources\entity_recognition'
model_path = os.path.join(basis, model_name)
load_start = time.time()
loaded_model = kashgari.utils.load_model(model_path)
load_end = time.time()
print("模型加载时间:",load_end-load_start)
return loaded_model
#
def cut_text(text, lenth):
textArr = re.findall('.{' + str(lenth) + '}', text)
textArr.append(text[(len(textArr) * lenth):])
return textArr
def extract_labels(text, ners):
ner_reg_list = []
if ners:
new_ners = []
for ner in ners:
new_ners += ner
for word, tag in zip([char for char in text], new_ners):
if tag != 'O':
ner_reg_list.append((word, tag))
# 输出模型的NER识别结果
labels = {}
if ner_reg_list:
for i, item in enumerate(ner_reg_list):
if item[1].startswith('B'):
label = ""
end = i + 1
while end <= len(ner_reg_list) - 1 and ner_reg_list[end][1].startswith('I'):
end += 1
ner_type = item[1].split('-')[1]
if ner_type not in labels.keys():
labels[ner_type] = []
label += ''.join([item[0] for item in ner_reg_list[i:end]])
labels[ner_type].append(label)
return labels
#文本分段
def text_pattern(text):
list = ['集团|公司','。|,|?|!|、|;|;|:']
text = '。。' + text
text = text[::-1]
temp = []
def dfs(text, temp):
if not text:
return temp
pattern_text = re.compile(list[0][::-1]).findall(text)
if pattern_text:
text = pattern_text[0] + text.split(pattern_text[0], 1)[1]
comma = re.compile(list[1]).findall(text)[0]
res_text = text.split(comma, 1)[0]
temp.append(res_text[::-1])
text = text.split(comma, 1)[1]
else:
# res.append(temp[:]) <class 'list'>: ['中广核新能源湖南分公司']
return temp
dfs(text,temp)
dfs(text,temp)
return temp
def final_test(path,model_name):
import pandas as pd
data = pd.read_table(path, header=None, encoding='utf-8', sep='\t')
data = data[:200]
data.columns = ['标题', '内容']
data['nr'] = data['标题'] + data['内容']
data['te'] = ''
for i in range(len(data)):
first_text = data['nr'][i].replace(" ", "")
print("原始文本:",first_text)
last_text = text_pattern(first_text)
if not last_text:
continue
last = []
for text_input in last_text:
texts = cut_text(text_input, 100)
pre_start = time.time()
ners = load_model(model_name).predict([[char for char in text] for text in texts])
pre_end = time.time()
print("切割文章的预测时间:",pre_end - pre_start)
print("切割的文章内容:",text_input)
print("切割文本的BIO结果:",ners)
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司', j):# j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油', j):#j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(str(j[-2:])): #'集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
print("各个类型的实体结果:", entity)
print("集团公司:", res)
if res:
last.append('|'.join(res))
last = list(set(last))
data['te'][i] = '|'.join(last)
print('最后的公司结果:',"|".join(last))
pd.DataFrame(data).to_csv('result/a.csv', index=False)
#单句预测
def single_sentence(sentence,model_name):
first_text = sentence.replace(" ", "")
print("原始文本:", first_text)
last_text = text_pattern(first_text)
if last_text:
last = []
for text_input in last_text:
texts = cut_text(text_input, 100)
pre_start = time.time()
ners = load_model(model_name).predict([[char for char in text] for text in texts])
pre_end = time.time()
print("切割文章的预测时间:", pre_end - pre_start)
print("切割的文章内容:", text_input)
print("切割文本的BIO结果:", ners)
labels = extract_labels(text_input, ners)
res = []
if labels.__contains__('ORG') and labels.__contains__('LOC'):
entity = labels['ORG'] + labels['LOC']
elif labels.__contains__('ORG'):
entity = labels['ORG']
elif labels.__contains__('LOC'):
entity = labels['LOC']
else:
entity = []
for j in entity:
punc = '~`!#$%^&*()_+-=|\';":/.,?><~·!@#¥%……&*()——+-=“:’;、。,?》《{}'
j = re.sub(r"[%s]+" % punc, "", j)
if re.fullmatch('集团|公司|子公司|本公司|家公司|分公司|上市公司',
j): # j == '公司' or j =='集团' or j == '子公司' or j =='本公司' or j == '家公司' or j =='分公司' or j =='上市公司' or j =='母公司': #re.compile('子公司|本公司|家公司|分公司|上市公司').findall(str(j)) or
break
if re.fullmatch('丰田|华为|苹果|微软|爱立信|阿里|三星|中国联通|中国移动|腾讯|联想|台机电|小米|亚马逊|甲骨文|高通|软银|特斯拉|百度|中石化|中石油',
j): # j =='华为' or j =='苹果' or j =='微软' or j=='爱立信' or j=='阿里' or j =='三星' or j =='中国联通' or j =='中国移动' or j =='腾讯' or j =='联想':
res.append(j)
elif re.compile('集团|公司|科技|煤炭|医药|工厂|国际|银行|钢铁|机械').findall(
str(j[-2:])): # '集团|有限公司|公司|科技|医药|苹果|华为|谷歌|河南863|富士康'
res.append(j)
res = list(set(res))
print("各个类型的实体结果:", entity)
print("集团公司:", res)
if res:
last.append('|'.join(res))
last = list(set(last))
result = "|".join(last)
print('最后的公司结果:', result)
return result
#列表式预测
def multi_sentence(sentencelist,out_path,model_name):
df_data_output = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2018 Twitter, Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
""" This module contains methods for validating the latency predictions of
the Caladrius Heron queueing theory model."""
import argparse
import datetime as dt
import logging
import os
import sys
from typing import Dict, Any
import pandas as pd
from caladrius import loader, logs
from caladrius.graph.gremlin.client import GremlinClient
from caladrius.graph.utils.heron import graph_check, paths_check
from caladrius.metrics.heron.client import HeronMetricsClient
from caladrius.model.topology.heron.queueing_theory import QTTopologyModel
from caladrius.traffic_provider.current_traffic import CurrentTraffic
from gremlin_python.process.graph_traversal import outE, not_
LOG: logging.Logger = logging.getLogger(__name__)
HISTORICAL_METRICS_DURATION = 120
def _create_parser() -> argparse.ArgumentParser:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description=("This program validates the predictions of the "
"Caladrius queuing theory performance modelling system "
"for Heron Topologies"))
parser.add_argument("--config", required=True,
help=("Path to the config file with data required by "
"all configured models and classes"))
parser.add_argument("-q", "--quiet", required=False, action="store_true",
help=("Optional flag indicating if console log output "
"should be suppressed"))
parser.add_argument("--debug", required=False, action="store_true",
help=("Optional flag indicating if debug level "
"information should be displayed"))
parser.add_argument("-od", "--output_dir", required=True,
help="Output directory to save results DataFrames.")
parser.add_argument("-t", "--topology", required=True)
parser.add_argument("-c", "--cluster", required=True)
parser.add_argument("-e", "--environ", required=True)
return parser
if __name__ == "__main__":
ARGS: argparse.Namespace = _create_parser().parse_args()
try:
CONFIG: Dict[str, Any] = loader.load_config(ARGS.config)
except FileNotFoundError:
print(f"Config file: {ARGS.config} was not found. Aborting...",
file=sys.stderr)
sys.exit(1)
else:
if not ARGS.quiet:
print("\nStarting Caladrius Heron Validation\n")
print(f"Loading configuration from file: {ARGS.config}")
CONFIG: Dict[str, Any] = loader.load_config(ARGS.config)
if not os.path.exists(CONFIG["log.file.dir"]):
os.makedirs(CONFIG["log.file.dir"])
LOG_FILE: str = CONFIG["log.file.dir"] + "/validation_heron.log"
logs.setup(console=(not ARGS.quiet), logfile=LOG_FILE, debug=ARGS.debug)
# GRAPH CLIENT
graph_client: GremlinClient = \
loader.get_class(CONFIG["graph.client"])(CONFIG["graph.client.config"])
# HERON METRICS CLIENT
metrics_client: HeronMetricsClient = \
loader.get_class(CONFIG["heron.metrics.client"])(
CONFIG["heron.metrics.client.config"])
# TOPOLOGY PERFORMANCE MODEL
cluster = ARGS.cluster
environ = ARGS.environ
topology = ARGS.topology
topology_latencies: pd.DataFrame = pd.DataFrame(columns=['topology', 'av_actual_latency', 'std_actual_latency',
'av_calculated_latency', 'std_predicted_latency'])
system_metrics: pd.DataFrame = pd.DataFrame(columns=['topology', 'component', 'av_gc', 'std_gc',
'av_cpu_load', 'std_cpu_load'])
# Make sure we have a current graph representing the physical plan for
# the topology
graph_check(graph_client, CONFIG["heron.topology.models.config"], CONFIG["heron.tracker.url"],
cluster, environ, topology)
# Make sure we have a file containing all paths for the job
paths_check(graph_client, CONFIG["heron.topology.models.config"], cluster, environ, topology)
model_kwargs = dict()
model_kwargs["zk.time.offset"] = CONFIG["heron.topology.models.config"]["zk.time.offset"]
model_kwargs["heron.statemgr.root.path"] = CONFIG["heron.topology.models.config"]["heron.statemgr.root.path"]
model_kwargs["heron.statemgr.connection.string"] = \
CONFIG["heron.topology.models.config"]["heron.statemgr.connection.string"]
now = dt.datetime.now()
start, end = now - dt.timedelta(minutes=HISTORICAL_METRICS_DURATION), now
traffic_provider: CurrentTraffic = CurrentTraffic(metrics_client, graph_client, topology, cluster,
environ, start, end, {}, **model_kwargs)
qt: QTTopologyModel = QTTopologyModel(CONFIG["heron.topology.models.config"], metrics_client, graph_client)
results = pd.DataFrame(qt.find_current_instance_waiting_times(topology_id=topology, cluster=cluster, environ=environ,
traffic_source=traffic_provider, start=start, end=end,
**model_kwargs))
sinks = graph_client.graph_traversal.V().has("topology_id", topology).hasLabel("bolt").\
where(not_(outE("logically_connected"))).properties('component').value().dedup().toList()
actual_latencies: pd.DataFrame = pd.DataFrame
for sink in sinks:
result = metrics_client.get_end_to_end_latency(topology, cluster, environ, sink, start, end)
result["average_latency"] = result["end_to_end_latency"] / result["tuple_count"]
if actual_latencies.empty:
actual_latencies = result
else:
actual_latencies.append(result, ignore_index=True)
topology_latencies = topology_latencies.append({'topology': topology,
'av_actual_latency': actual_latencies['average_latency'].mean(),
'std_actual_latency': actual_latencies['average_latency'].std(),
'av_calculated_latency': results['latency'].mean(),
'std_predicted_latency': results['latency'].std()},
ignore_index=True)
CPU_LOAD = metrics_client.get_cpu_load(topology, cluster, environ, start, end)
GC_TIME = metrics_client.get_gc_time(topology, cluster, environ, start, end)
CAPACITY = metrics_client.get_capacity(topology, cluster, environ, start, end)
load = | pd.DataFrame(columns=['topology', 'component', 'av_cpu_load', 'std_cpu_load']) | pandas.DataFrame |
import datetime
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import time
from sklearn import datasets
from sklearn.metrics import roc_curve, auc, classification_report, log_loss, accuracy_score
from sklearn.model_selection import train_test_split, RandomizedSearchCV, StratifiedKFold
from sklearn.model_selection._split import _BaseKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, BaggingClassifier
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from scipy.stats import norm
from itertools import cycle
from statsmodels.tsa.stattools import adfuller
def plot_mtum(df):
'''
Crea una figura con dos graficos en columna.
El grafico de arriba imprime la evolucion del precio de cierre, maximo y minimo de forma diaria.
El grafico de abajo imprime la evolucion del volumen operado en el dia.
@param df Es el data frame de pandas de donde se extraen los valores.
Espera que tenga cuatro series completas: 'Close','High', 'Low' y 'Date'.
'''
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(20,10))
df.plot(kind='line',y='Close', x='Date', color='blue', ax=axes[0])
df.plot(kind='line',y='High', x='Date', color='green', ax=axes[0])
df.plot(kind='line',y='Low', x='Date', color='red', ax=axes[0])
df.plot(kind='line',y='Open', x='Date', color='orange', ax=axes[0])
plt.title('MTUM prices')
df.plot(kind='line',y='Volume', x='Date', color='blue', ax=axes[1])
plt.title('MTUM volume')
plt.show()
def tick_bars(df, price_column, m):
'''
compute tick bars
# args
df: pd.DataFrame()
column: name for price data
m: int(), threshold value for ticks
# returns
idx: list of indices
'''
t = df[price_column]
ts = 0
idx = []
for i, x in enumerate(t):
ts += 1
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def tick_bar_df(df, price_column, m):
'''
Filtra `df` por los tick_bars
'''
idx = tick_bars(df, price_column, m)
return df.iloc[idx].drop_duplicates()
def volume_bars(df, volume_column, m):
'''
compute volume bars
# args
df: pd.DataFrame()
volume_column: name for volume data
m: int(), threshold value for volume
# returns
idx: list of indices
'''
t = df[volume_column]
ts = 0
idx = []
for i, x in enumerate(t):
ts += x
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def volume_bar_df(df, volume_column, m):
idx = volume_bars(df, volume_column, m)
return df.iloc[idx].drop_duplicates()
def create_dollar_volume_series(df, price_col, volume_col):
return df[price_col] * df[volume_col]
def dollar_bars(df, dv_column, m):
'''
compute dollar bars
# args
df: pd.DataFrame()
dv_column: name for dollar volume data
m: int(), threshold value for dollars
# returns
idx: list of indices
'''
t = df[dv_column]
ts = 0
idx = []
for i, x in enumerate(t):
ts += x
if ts >= m:
idx.append(i)
ts = 0
continue
return idx
def dollar_bar_df(df, dv_column, m):
idx = dollar_bars(df, dv_column, m)
return df.iloc[idx].drop_duplicates()
def tick_direction(prices):
'''
Computa un vector de ticks {1, -1} cuyo signo indica el valor
del retorno entre dos muestras consecutivas.
El valor inicial es el mismo que el primero computado.
El vector de retorno tiene el mismo tamaño que @p prices.
@param prices Es un vector de precios a diferenciar y obtener el signo del retorno.
@return b_t, un vector de tick imbalance bars.
'''
tick_directions = prices.diff()
tick_directions[0] = tick_directions[1]
tick_directions = tick_directions.transform(lambda x: np.sign(x))
return tick_directions
def signed_volume(tick_directions, volumes):
'''
Computa una serie de volumenes signados segun el computo de ticks.
@param tick_directions La serie con el signo de del retorno.
@param volumes La serie de volumenes para cada sample temporal de retorno.
@return Una serie de volumenes signados, o bien el producto elemento a elemento de
@p tick_directions con @p volumes.
'''
return tick_directions.multiply(volumes)
def exponential_weighted_moving_average(arr_in, window):
'''
@see https://stackoverflow.com/a/51392341
Exponentialy weighted moving average specified by a decay ``window``
assuming infinite history via the recursive form:
(2) (i) y[0] = x[0]; and
(ii) y[t] = a*x[t] + (1-a)*y[t-1] for t>0.
This method is less accurate that ``_ewma`` but
much faster:
In [1]: import numpy as np, bars
...: arr = np.random.random(100000)
...: %timeit bars._ewma(arr, 10)
...: %timeit bars._ewma_infinite_hist(arr, 10)
3.74 ms ± 60.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
262 µs ± 1.54 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
Parameters
----------
arr_in : np.ndarray, float64
A single dimenisional numpy array
window : int64
The decay window, or 'span'
Returns
-------
np.ndarray
The EWMA vector, same length / shape as ``arr_in``
Examples
--------
>>> import pandas as pd
>>> a = np.arange(5, dtype=float)
>>> exp = pd.DataFrame(a).ewm(span=10, adjust=False).mean()
>>> np.array_equal(_ewma_infinite_hist(a, 10), exp.values.ravel())
True
'''
n = arr_in.shape[0]
ewma = np.empty(n, dtype=float)
alpha = 2 / float(window + 1)
ewma[0] = arr_in[0]
for i in range(1, n):
ewma[i] = arr_in[i] * alpha + ewma[i-1] * (1 - alpha)
return ewma
def compute_initial_e_v(signed_volumes):
'''
Computa el valor absoluto de la media de los volumenes signados.
Sirve como estimacion del valor inicial de Φ_T para toda la serie de volumenes.
'''
return abs(signed_volumes.mean())
def compute_tick_imbalance(signed_volumes, e_t_0, abs_e_v_0):
'''
@param signed_volumes Serie de volumenes signados.
@param e_t_0 El valor inicial de la $E(T)$
@param abs_e_v_0 El valor absoluto del valor medio (hint) de $Φ_T$.
@return Una tupla {Ts, abs_thetas, thresholds, i_s} donde:
Ts: es un vector con los valores de $T$ que se tomaron como largo de ventana de EWMA.
abs_thetas: es un vector que indica los valores de Φ_T para cada valor de volumen.
thresholds: es un vector que indica el valor the umbrales que se como para cada valor de volumen.
i_s: es un vector con los valores de los indices que referencia al vector de volumen con un cambio de tick.
'''
Ts, i_s = [], []
# Valores de la iteracion
# i_prev: valor de indice previo donde se fijo $T$.
# e_t: $E(T)$ iteracion a iteracion.
# abs_e_v: $|Φ_T|$ iteracion a iteracion.
i_prev, e_t, abs_e_v = 0, e_t_0, abs_e_v_0
n = signed_volumes.shape[0]
signed_volumes_val = signed_volumes.values.astype(np.float64)
abs_thetas, thresholds = np.zeros(n), np.zeros(n)
abs_thetas[0], cur_theta = np.abs(signed_volumes_val[0]), signed_volumes_val[0]
for i in range(1, n):
cur_theta += signed_volumes_val[i]
abs_theta = np.abs(cur_theta)
abs_thetas[i] = abs_theta
threshold = e_t * abs_e_v
thresholds[i] = threshold
if abs_theta >= threshold:
cur_theta = 0
Ts.append(np.float64(i - i_prev))
i_s.append(i)
i_prev = i
e_t = exponential_weighted_moving_average(np.array(Ts), window=np.int64(len(Ts)))[-1]
abs_e_v = np.abs(exponential_weighted_moving_average(signed_volumes_val[:i], window=np.int64(e_t_0 * 3))[-1] ) # window of 3 bars
return Ts, abs_thetas, thresholds, i_s
def compute_ewma(prices, window_size):
'''
Computes the EWMA of a price series with a certain window size.
@param prices A pandas series.
@param window_size EWMA window size.
@return The EWMA with `window_size` window size of `prices`.
'''
return prices.ewm(window_size).mean()
def get_up_cross(fast_ewma, slow_ewma):
'''
Computes the fast EWMA serie cross over the slow EWMA serie.
@param fast_ewma A fast EWMA pandas series.
@param slow_ewma A slow EWMA pandas series.
@return A filtered version of `fast_ewma` that indicates when the buy trend starts.
'''
crit1 = fast_ewma.shift(1) < slow_ewma.shift(1)
crit2 = fast_ewma > slow_ewma
return fast_ewma[(crit1) & (crit2)]
def get_down_cross(fast_ewma, slow_ewma):
'''
Computes the slow EWMA serie cross over the fast EWMA serie.
@param fast_ewma A fast EWMA pandas series.
@param slow_ewma A slow EWMA pandas series.
@return A filtered version of `fast_ewma` that indicates when the sell trend starts.
'''
crit1 = fast_ewma.shift(1) > slow_ewma.shift(1)
crit2 = fast_ewma < slow_ewma
return fast_ewma[(crit1) & (crit2)]
def create_bet_signal_fast_slow_ewma(df, price_column, fast_window_size, slow_window_size):
'''
Computes the buy / sell events based on the 50-200 EWMA cross.
Appends three series to `df`:
1- FastEWMA : the fast EWMA computed with `fast_window_size`.
2- SlowEWMA : the fast EWMA computed with `slow_window_size`.
3- BetEWMA : an integer series with {1, 0, -1} values meaning {Buy, Do nothing, Sell}.
@param df A pandas data frame to extract the price series from.
@param price_column A string telling the name of the price series.
@param fast_window_size The fast EWMA window size.
@param slow_window_size The slow EWMA window size.
@return `df` with the appended columns.
'''
fast_ewma = compute_ewma(df[price_column], fast_window_size)
slow_ewma = compute_ewma(df[price_column], slow_window_size)
buy_bet = get_up_cross(fast_ewma, slow_ewma)
sell_bet = get_down_cross(fast_ewma, slow_ewma)
df['FastEWMA'] = fast_ewma
df['SlowEWMA'] = slow_ewma
df['BetEWMA'] = 0
df.BetEWMA.iloc[buy_bet.index] = 1
df.BetEWMA.iloc[sell_bet.index] = -1
return df
def plot_ewma_bet_signals(df):
f, ax = plt.subplots(figsize=(20,10))
df.plot(ax=ax, alpha=.5, y='Close', x='Date', color='blue')
df.plot(ax=ax, alpha=.5, y='FastEWMA', x='Date', color='yellow')
df.plot(ax=ax, alpha=.5, y='SlowEWMA', x='Date', color='brown')
df.Close.loc[df.BetEWMA == 1].plot(ax=ax, ls='', marker='^', markersize=7, alpha=0.75, label='Buy', color='green')
df.Close.loc[df.BetEWMA == -1].plot(ax=ax, ls='', marker='v', markersize=7, alpha=0.75, label='Sell', color='red')
ax.grid()
ax.legend()
def getDailyVol(close,span0=100):
'''
Computes the daily volatility of price returns.
It takes a closing price series, applies a diff sample to sample
(assumes each sample is the closing price), computes an EWM with
`span0` samples and then the standard deviation of it.
@param[in] close A series of prices where each value is the closing price of an asset.
@param[in] span0 The sample size of the EWM.
@return A pandas series of daily return volatility.
'''
df0=close.diff()
df0=df0 - 1
df0[0]=0
df0=df0.ewm(span=100).std().rename('dailyVol')
df0[0]=df0[1]
return df0
def getTEvents(close, h):
'''
Computes a pandas series of indices of `df[price_col]` that are the output
of a CUSUM positive and negative filter. The threshold of the filter is `h`.
@param[in] close A series of prices where each value is the closing price of an asset.
@param[in] h CUSUM filter threshold.
@return A pandas index series that mark where the CUSUM filter flagged either positive
and negative cumulative sums that are bigger than `h`.
'''
tEvents, sPos, sNeg = [], 0, 0
diff = close.diff()
diff[0] = 0
for i in diff.index:
sPos, sNeg = max(0, sPos+diff.loc[i]), min(0, sNeg + diff.loc[i])
if sNeg < -h:
sNeg = 0
tEvents.append(i)
if sPos > h:
sPos = 0
tEvents.append(i)
return pd.Int64Index(tEvents)
def addVerticalBarrier(tEvents, close, numDays=1):
'''
Returns a filtered pandas series of prices coming from `close` that
belong to the offset price in `numDays` of `tEvents` prices.
@param[in] tEvents A pandas index series that match the same type of `close`'s index.
@param[in] close A series of prices where each value is the closing price of an asset.
@param[in] numDays A delta in samples to apply to all a vertical barrier.
@return A pandas series of prices.
'''
t1=close.index.searchsorted(tEvents + numDays)
t1=t1[t1<close.shape[0]]
t1=(pd.Series(close.index[t1],index=tEvents[:t1.shape[0]]))
return t1
def applyPtSlOnT1(close,events,ptSl,molecule):
'''
Apply stop loss/profit taking, if it takes place before t1 (end of event)
@param[in] close A pandas series of prices.
@param[in] events A pandas dataframe, with columns:
- `t1`: the timestamp of vertical barries. When the value is np.nan, there will not be a vertical barrier.
- `trgt`: the unit width of the horizontal barriers.
@param[in] ptSl A list of two non-negative float values:
- `ptSl[0]`: the factor that multiplies `trgt` to set the width of the upper barrier. If 0, there will not be an upper barrier.
- `ptSl[1]`: the factor that multiplies `trgt` to set the width of the lower barrier. If 0, there will not be an lower barrier.
@param[in] molecule A list with the subset of event indices that will be processed by a single thread.
'''
events_=events.loc[molecule]
out=events_[['t1']].copy(deep=True)
if ptSl[0]>0: pt=ptSl[0]*events_['trgt']
else: pt=pd.Series(index=events.index) # NaNs
if ptSl[1]>0: sl=-ptSl[1]*events_['trgt']
else: sl=pd.Series(index=events.index) # NaNs
for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():
loc = int(loc)
t1 = int(t1)
df0=close[loc:t1] # path prices
df0=(df0/close[loc]-1)*events_.at[loc,'side'] # path returns
out.loc[loc,'sl']=df0[df0<sl[loc]].index.min() # earliest stop loss
out.loc[loc,'pt']=df0[df0>pt[loc]].index.min() # earliest profit taking
return out
def getEvents(close, tEvents, ptSl, trgt, minRet, t1=False, side=None):
#1) get target
trgt=trgt.loc[tEvents]
trgt=trgt[trgt>minRet] # minRet
#2) get t1 (max holding period)
if t1 is False:t1=pd.Series(pd.NaT, index=tEvents)
#3) form events object, apply stop loss on t1
if side is None:side_,ptSl_=pd.Series(1.,index=trgt.index), [ptSl[0],ptSl[0]]
else: side_,ptSl_=side.loc[trgt.index],ptSl[:2]
events=(pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis=1)
.dropna(subset=['trgt']))
df0=applyPtSlOnT1(close, events, ptSl_, events.index)
events['t1']=df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:events=events.drop('side',axis=1)
return events
def getBins(events, close):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' in events_:out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
def getBinsNew(events, close, t1=None):
'''
Compute event's outcome (including side information, if provided).
events is a DataFrame where:
-events.index is event's starttime
-events['t1'] is event's endtime
-events['trgt'] is event's target
-events['side'] (optional) implies the algo's position side
-t1 is original vertical barrier series
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
'''
#1) prices aligned with events
events_=events.dropna(subset=['t1'])
px=events_.index.union(events_['t1'].values).drop_duplicates()
px=close.reindex(px,method='bfill')
#2) create out object
out=pd.DataFrame(index=events_.index)
out['ret']=px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:out['ret']*=events_['side'] # meta-labeling
out['bin']=np.sign(out['ret'])
if 'side' not in events_:
# only applies when not meta-labeling
# to update bin to 0 when vertical barrier is touched, we need the original
# vertical barrier series since the events['t1'] is the time of first
# touch of any barrier and not the vertical barrier specifically.
# The index of the intersection of the vertical barrier values and the
# events['t1'] values indicate which bin labels needs to be turned to 0
vtouch_first_idx = events[events['t1'].isin(t1.values)].index
out.loc[vtouch_first_idx, 'bin'] = 0.
if 'side' in events_:out.loc[out['ret']<=0,'bin']=0 # meta-labeling
return out
def getRandomForest(n_estimator=150, oob_score=False, max_samples=None):
return RandomForestClassifier(max_depth=2, n_estimators=n_estimator, criterion='entropy', class_weight='balanced_subsample', random_state=RANDOM_STATE, oob_score=oob_score, max_samples=max_samples)
def plotROC(y_test, y_pred_rf):
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
def train_test_samples(events, labels, test_size, binarize = False):
X = events_side.dropna().values.reshape(-1,1)
y = labels.bin.values
if binarize: y = label_binarize(y, [-1, 0, 1])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, shuffle=False)
return X, y, X_train, X_test, y_train, y_test
def getSignal(events, stepSize, prob, pred, numClasses, **kargs):
if prob.shape[0] == 0: return pd.Series()
signal0 = (prob - 1. / numClasses) / (prob * (1. - prob)) ** 0.5
signal0 = pred * (2. * norm.cdf(signal0) - 1.)
return signal0
# if 'side' in events: signal0*=events.loc[signal0.index, 'side']
# df0 = signal0.to_frame('signal').join(events[['t1']], how='left')
# df0 = avgActiveSignals(df0)
# signal1 = discreteSignal(signal0=pd.Series(signal0), stepSize=stepSize)
# return signal1
def avgActiveSignals(signals):
tPnts = set(signals['t1'].dropna().values)
tPnts = tPnts.union(signals.index.values)
tPnts = list(tPnts); tPnts.sort()
out = mpAvgActiveSignals(signals, ('molecule', tPnts))
return out
def mpAvgActiveSignals(signals, molecule):
out = | pd.Series() | pandas.Series |
# standard libraries
import enum
import io
import unittest
# third-party libraries
import pandas
# library under test
import ccbb_pyutils.alignment_stats as ns_test
class TestFunctions(unittest.TestCase):
def _get_fastqc_test_data_dir(self):
return "test_data/fastqc_data/"
def _get_fastqc_and_star_htseq_data(self):
return "test_data/fastqc_and_star_htseq_data/"
# region _find_total_seqs_from_fastqc
def test__find_total_seqs_from_fastqc_ignore(self):
line = "##FastQC 0.11.3"
input_record = {"not much": "you"}
expected_record = input_record.copy()
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
def test__find_total_seqs_from_fastqc_filename(self):
line = "Filename ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {"Sample": "ARH1_S1", "not much": "you"}
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
def test__find_total_seqs_from_fastqc_total(self):
line = "Total Sequences 32416013"
input_record = {"not much": "you"}
expected_record = {"Total Reads": 32416013.0, "not much": "you"}
real_output = ns_test._find_total_seqs_from_fastqc(line, input_record)
self.assertEqual(expected_record, real_output)
# end region
# region _find_fastqc_statuses_from_fastqc
def test__find_fastqc_statuses_from_fastqc_ignore_passed_of_interest(self):
line = "PASS Basic Statistics ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': [], 'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Basic Statistics"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_not_of_interest(self):
# still should put in file name
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': [], 'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Basic Statistics"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_no_notes_has_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'Sample': 'Tester', "not much": "you"}
expected_record = {'FASTQC Messages': ['FAIL: Per tile sequence quality'], 'Sample': 'Tester',
'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_has_notes_has_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'Sample': 'Tester', 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'FAIL: Per tile sequence quality'],
'Sample': 'Tester', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_no_notes_no_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {"not much": "you"}
expected_record = {'FASTQC Messages': ['FAIL: Per tile sequence quality'], 'Sample': 'ARH1_S1',
'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
def test__find_fastqc_statuses_from_fastqc_ignore_failed_has_notes_no_name(self):
line = "FAIL Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'FAIL: Per tile sequence quality'],
'Sample': 'ARH1_S1', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
# Note: didn't retest all the functionality with WARN, just one representative case based on known
# structure of the code (whitebox, remember? :)
def test__find_fastqc_statuses_from_fastqc_ignore_warned_has_notes_has_name(self):
line = "WARN Per tile sequence quality ARH1_S1.fastq.gz"
input_record = {'FASTQC Messages': ['WARN: Per base sequence content'], 'Sample': 'Tester', 'not much': 'you'}
expected_record = {'FASTQC Messages': ['WARN: Per base sequence content', 'WARN: Per tile sequence quality'],
'Sample': 'Tester', 'not much': 'you'}
real_output = ns_test._find_fastqc_statuses_from_fastqc(line, input_record, ["Per tile sequence quality"])
self.assertEqual(expected_record, real_output)
# end region
# region _loop_over_fastqc_files
def test__loop_over_fastqc_files_w_extra_args(self):
expected_data = [
{'FASTQC Messages': ['FAIL: Per tile sequence quality', 'WARN: Overrepresented sequences'],
'Sample': 'ARH1_S1'},
{'FASTQC Messages': ['FAIL: Per tile sequence quality', 'FAIL: Per sequence quality scores',
'WARN: Overrepresented sequences'], 'Sample': 'ARH3_S3'}]
expected_output = | pandas.DataFrame(expected_data) | pandas.DataFrame |
import numpy as np
import pandas as pd
from functools import lru_cache
# train-test split by a percentage.
# input: dataframe, label column name, split ration, and random state
# returns: x_train, x_test, y_train, y_test
def split_df(user_df, label_name, split_ratio=0.8, random_value=42):
x_train = user_df.sample(frac=split_ratio, random_state=random_value)
x_test = user_df.drop(x_train.index)
return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(
x_train[label_name]), pd.DataFrame(x_test[label_name])
# splits the dataframe to two between by a given value in a specific feature
# input: dataframe, feature column name, and threshold split value
# returns: left_split - data with feature values lower then the threshold
# right_split - higher threshold values
def split_by_feature(user_df, feature_name, split_value):
user_df = user_df.sort_values(feature_name, axis=0)
left_split = user_df.iloc[0:split_value]
right_split = user_df.iloc[split_value:-1]
return left_split, right_split
# merge feature with label.
# order feature value from small to large (keep original row number) and generate gap values feature
# input: dataframe (x) and labels list (y)
# returns: DataFrame with new index ordered index old index and value order by size
# the ordered value are generated in between each data point of the given dataframe
def order_features(current_df: pd.DataFrame, feature: str, labels_list: pd.DataFrame):
# create new dataframe with orders values and new index
current_df['label'] = labels_list
current_df = current_df.sort_values(feature, axis=0)
ordered_df = current_df[[feature, 'label']]
ordered_df = ordered_df.reset_index(drop=False)
ordered_df = ordered_df.append(ordered_df.iloc[len(ordered_df) - 1], ignore_index=True)
ordered_df['index'] = ordered_df['index'].astype(int)
ordered_df['label'] = ordered_df['label'].astype(int)
new_values = []
for i in range(0, len(ordered_df)):
if i == 0:
new_values.append(ordered_df[feature].iloc[i] / 2)
elif i == len(ordered_df) - 1:
new_values.append((ordered_df[feature].iloc[i] + ordered_df[feature].iloc[i]) / 2)
else:
new_values.append((ordered_df[feature].iloc[i] + ordered_df[feature].iloc[i - 1]) / 2)
ordered_df['averaged'] = new_values
return ordered_df
# calculate gini index of the entire data frame and returns the position of minimum value
# input: dataframe (x) and labels list (y)
# returns: row number and column name and
def get_split(current_df: pd.DataFrame, labels_list: pd.DataFrame):
# create an initial gini_matrix with 0.5 in each cell
gini_matrix = np.ones((len(current_df) + 1, len(current_df.columns)))
gini_matrix = gini_matrix - 0.5
gini_matrix = pd.DataFrame(gini_matrix, columns=current_df.columns)
# amount of rows in dataframe
total_samples = len(current_df)
# examine the data column be column
for feature in current_df.columns:
# order feature value from small to large (keep original row number)
ordered_features = order_features(current_df, feature, labels_list)
# examine rows in column
for current_position in range(0, len(ordered_features)):
# count the amount of 1 labels from start to current label
counter_before = 0
for i in range(0, current_position):
if ordered_features['label'].iloc[i] == 1:
counter_before += 1
# count the amount of 1 labels from current label to end
counter_after = 0
for i in range(current_position + 1, total_samples):
if ordered_features['label'].iloc[i] == 1:
counter_after += 1
# calculate ratio of 1, 0 and the gini of the data located before the current position
if current_position == 0:
proportion_before_1 = counter_before
else:
proportion_before_1 = counter_before / current_position
proportion_before_0 = 1 - proportion_before_1
gini_before = 1 - (proportion_before_1 ** 2 + proportion_before_0 ** 2)
# calculate ratio of 1, 0 and the gini of the data located after the current position
if total_samples - (current_position + 1) == 0:
proportion_after_1 = counter_after
else:
proportion_after_1 = counter_after / (total_samples - (current_position + 1))
proportion_after_0 = 1 - proportion_after_1
gini_after = 1 - (proportion_after_1 ** 2 + proportion_after_0 ** 2)
# calculate and update the gini matrix cell with the final gini value
gini_matrix.loc[current_position, feature] = abs(gini_before * (
current_position + 1) / total_samples) + abs(
gini_after * (1 - ((current_position + 1) / total_samples)))
row, column = gini_matrix.stack().idxmin()
ordered_feature = order_features(current_df, column, labels_list)
return int(ordered_feature.iloc[row]['index']), column # returns: row number, column name, and gini value
# Decision tree node
class Node:
left_node: pd.DataFrame
right_node: pd.DataFrame
current_df: pd.DataFrame
feature: int
row: int
depth: int
leaf: int
labels: np.ndarray
def __init__(self, current_df=pd.DataFrame(), depth=0):
self.current_df = current_df
self.feature = 0
self.row = 0
self.depth = depth
self.leaf = 0
self.left_node = pd.DataFrame()
self.right_node = | pd.DataFrame() | pandas.DataFrame |
from PySide2.QtWidgets import QWidget,QTableView,QAbstractItemView, QHeaderView, QPlainTextEdit, \
QVBoxLayout
from PySide2.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor, QPainter, \
QImage, QIcon
from PySide2.QtCore import Qt, QSortFilterProxyModel, QFile, QEvent, QRect, Signal, QRegExp
from config import ASSETS_PATH
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use("Agg")
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
FAILED_ICON = str(ASSETS_PATH.joinpath("failed.png").resolve())
SENT_ICON = str(ASSETS_PATH.joinpath("sent.png").resolve())
INVALID_ICON = str(ASSETS_PATH.joinpath("invalid.png").resolve())
EMOJI = str(ASSETS_PATH.joinpath("emoji.png").resolve())
COLORS = {'Sent': "#87de87",
'Failed': "#ffdd55",
'Invalid': "#de8787",
'Unsent': "#d8d8d8"
}
class SummaryChart(QWidget):
"""docstring for SummaryChart"""
def __init__(self, data_df=pd.DataFrame(),parent=None):
super(SummaryChart, self).__init__(parent)
self._data_df = data_df
self._x,self._y,self._xaxis,self._yaxis =self.get_chart_data("status")
self.create_chart()
self.main_layout = QVBoxLayout(self)
self.main_layout.addWidget(self._canvas)
def create_chart(self):
matplotlib.rcParams['text.color'] = COLORS["Unsent"]
matplotlib.rcParams['axes.labelcolor'] = COLORS["Unsent"]
matplotlib.rcParams['xtick.color'] = COLORS["Unsent"]
matplotlib.rcParams['ytick.color'] = COLORS["Unsent"]
self._figure = Figure()
self._canvas = FigureCanvas(self._figure)
self._canvas.setParent(self)
self._figure.set_facecolor("none")
self.setStyleSheet("background-color:#1a1a1a;")
width = 0.35 # the width of the bars
x = np.arange(len(self._x)) # the label locations
self._axes = self._figure.add_subplot(111)
status_reacts = self._axes.bar(x, self._y, width, label='Status')
self._axes.text(0, self._y[0]+ (0.1*self._y[0]) , "{} {}{} of \nTotal {} contacts".format(self._x[0], self._y[0]/sum(self._y)*100,"%", sum(self._y)), fontsize= 'x-large')
for idx,x_tick in enumerate(self._x):
if x_tick not in ["Sent", "Failed","Invalid"]:
status_reacts[idx].set_color(COLORS["Unsent"])
else:
status_reacts[idx].set_color(COLORS[x_tick])
# Add some text for labels, title and custom x-axis tick labels, etc.
# self._axes.set_ylabel('Jumlah')
self._axes.set_facecolor("#1a1a1a")
self._axes.tick_params(axis='x', colors=COLORS["Unsent"])
self._axes.set_xticks(x, ["Unsent" if xtick =="" else xtick for xtick in self._x ])
# self._axes.bar_label(status_reacts, padding=3) # untuk matplotlib versi 3.5.1
def autolabel(rects, xpos='center',ax=self._axes):
"""
Attach a text label above each bar in *rects*, displaying its height.
*xpos* indicates which side to place the text w.r.t. the center of
the bar. It can be one of the following {'center', 'right', 'left'}.
"""
xpos = xpos.lower() # normalize the case of the parameter
ha = {'center': 'center', 'right': 'left', 'left': 'right'}
offset = {'center': 0.5, 'right': 0.57, 'left': 0.43} # x_txt = x + w*off
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()*offset[xpos], 1.01*height,
'{}'.format(height), ha=ha[xpos], va='bottom')
autolabel(status_reacts)
self._figure.patch.set_visible(False)
self._axes.yaxis.set_visible(False)
self._axes.yaxis.set_ticks([])
self._axes.xaxis.set_ticks([])
self._axes.spines["right"].set_visible(False)
self._axes.spines["top"].set_visible(False)
self._axes.spines["left"].set_visible(False)
self._axes.spines["bottom"].set_color(COLORS["Unsent"])
def get_chart_data(self,column):
_data_df = self._data_df.copy()
#_data_df[column] = _data_df[column].str.replace("","Unsent")
self.values_count = _data_df[column].value_counts()
self.values_count = self.values_count.reset_index()
new_col = []
for val in self.values_count["index"].tolist():
if val == "Sent":
new_col.append(3)
elif val == "Failed":
new_col.append(2)
elif val == "Invalid":
new_col.append(1)
else:
new_col.append(0)
self.values_count["nilai"] = new_col
self.values_count = self.values_count.sort_values(by = ['nilai'], ascending = False)
x= self.values_count["index"].tolist()
y= self.values_count["status"].tolist()
xlabel = "Status"
ylabel = "Jumlah"
return [x,y,xlabel,ylabel]
class EmptyTable(QPlainTextEdit):
dropped = Signal(bool)
fileDropped = Signal(str)
draggedLeave = Signal(bool)
def __init__(self, parent=None):
super(EmptyTable, self).__init__(parent)
self.wasReadOnly = True
def dropEvent(self, event):
print('event.mimeData().hasFormat("text/uri-list")',event.mimeData().hasFormat("text/uri-list"))
if event.mimeData().hasFormat("text/uri-list"):
event.acceptProposedAction()
local_file = event.mimeData().urls()[0].toLocalFile()
print("local_file", local_file)
self.fileDropped.emit(local_file)
self.dropped.emit(True)
if self.wasReadOnly:
self.setReadOnly(True)
self.hide()
else:
self.fileDropped.emit("")
self.dropped.emit(False)
def dragEnterEvent(self, event):
print("dragEnterEvent")
if self.isReadOnly():
self.setReadOnly(False)
event.acceptProposedAction()
def dragLeaveEvent(self, event):
print("dragLeaveEvent")
if self.wasReadOnly:
self.setReadOnly(True)
self.draggedLeave.emit(True)
def paintEvent(self, event):
super().paintEvent(event)
painter = QPainter(self.viewport())
painter.save()
col = self.palette().placeholderText().color()
painter.setPen(col)
fm = self.fontMetrics()
elided_text = fm.elidedText(
"Belum Ada Ringkasan Laporan", Qt.ElideRight, self.viewport().width()
)
painter.drawText(self.viewport().rect(), Qt.AlignCenter, elided_text)
painter.restore()
class ModelProxy(QSortFilterProxyModel):
def headerData(self, section, orientation, role):
# if display role of vertical headers
if orientation == Qt.Vertical and role == Qt.DisplayRole:
# return the actual row number
return section + 1
# for other cases, rely on the base implementation
return super(ModelProxy, self).headerData(section, orientation, role)
class SummaryModel(QStandardItemModel):
"""
Summary model
"""
def __init__(self, contact_df, parent=None):
super(SummaryModel, self).__init__(parent)
self.contact_df = contact_df
self.displayed_contact_df = contact_df.copy()
if 'token' in self.displayed_contact_df.columns:
self.displayed_contact_df.drop('token',inplace=True, axis=1)
if 'row' in self.displayed_contact_df.columns:
self.displayed_contact_df.drop('row',inplace=True, axis=1)
if 'responses' in self.displayed_contact_df.columns:
self.displayed_contact_df.drop('responses',inplace=True, axis=1)
if 'replied' in self.displayed_contact_df.columns:
self.displayed_contact_df.drop('replied',inplace=True, axis=1)
self.header_data = [str(col).lower().strip().replace("_"," ") for col in self.displayed_contact_df.columns]
self.setHorizontalHeaderLabels(self.header_data)
for idx_row, row in contact_df.iterrows():
for idx_col, col in enumerate(self.displayed_contact_df.columns):
item = QStandardItem(str(row[col]))
item.setIcon(QIcon(EMOJI))
if col in list(self.displayed_contact_df.columns)[-4:]:
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setEnabled(False)
self.setItem(idx_row, idx_col, item)
def data(self, index, role):
if index.isValid():
if role == Qt.ForegroundRole:
if index.row() < len(self.displayed_contact_df.index):
value = self.displayed_contact_df.at[index.row(),'status']
if value == 'Sent':
return QBrush(QColor(0, 85, 0))
elif value == 'Invalid':
return QBrush(QColor(170, 0, 0))
elif value == 'Failed':
return QBrush(QColor(255, 85, 0))
if role == Qt.DecorationRole:
if index.row() < len(self.displayed_contact_df.index):
value = self.displayed_contact_df.at[index.row(),'status']
if index.column() == 0:
if value == 'Sent':
return QIcon(SENT_ICON)
elif value == 'Invalid':
return QIcon(INVALID_ICON)
elif value == 'Failed':
return QIcon(FAILED_ICON)
if role == Qt.DisplayRole or role == Qt.EditRole:
value = ""
if index.row() < len(self.displayed_contact_df.index):
col_name = self.displayed_contact_df.columns[index.column()]
value = self.displayed_contact_df[col_name][index.row()]
if type(value) in [float, np.float64]:
if str(value) in ["nan","NaN"]:
value = ""
return str(value)
# return super(SummaryModel, self).data(index, role)
def setData(self, index, value, role):
if role == Qt.EditRole:
col_name = self.displayed_contact_df.columns[index.column()]
self.displayed_contact_df[col_name][index.row()] = value
if col_name in self.contact_df.columns:
self.contact_df[col_name][index.row()] = value
return True
return False
def getContacts(self):
return self.contact_df
class SummaryTableView(QTableView):
"""
Summary table view
"""
dragged = Signal(bool)
fileDropped = Signal(str)
def __init__(self, model, parent=None):
super(SummaryTableView, self).__init__(parent)
self.update_model(model)
self.setAcceptDrops(True)
self.viewport().setAcceptDrops(True)
self.setDropIndicatorShown(True)
def paintEvent(self, event):
super().paintEvent(event)
if self.model() is not None and self.model().rowCount() > 0:
return
# print("self.viewport()", self.viewport())
painter = QPainter(self.viewport())
painter.save()
col = self.palette().placeholderText().color()
painter.setPen(col)
fm = self.fontMetrics()
elided_text = fm.elidedText(
"Drag file Excel atau CSV untuk mengimpor Contacts", Qt.ElideRight, self.viewport().width()
)
painter.drawText(self.viewport().rect(), Qt.AlignCenter, elided_text)
painter.drawImage(QRect(self.viewport().rect().width()//2, self.viewport().rect().height()//2 + 20, 25, 25), QImage(str(ASSETS_PATH.joinpath("import.png").resolve())))
painter.restore()
def update_model(self, model=None):
if model:
self.proxyModel = ModelProxy()
self.tableModel = model
self.proxyModel.setSourceModel(self.tableModel)
self.setSortingEnabled(True)
self.setModel(self.proxyModel)
if self.model():
self.setWordWrap(True)
self.setTextElideMode(Qt.ElideNone)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
hhdr = self.horizontalHeader()
for col in range(len(self.tableModel.header_data)):
hhdr.setSectionResizeMode(col, QHeaderView.ResizeToContents)
def reset_model(self):
try:
self.proxyModel.deleteLater()
except:
print("Error reset Table View")
def dragEnterEvent(self, event):
# super().dragEnterEvent(event)
if event.mimeData().hasFormat("text/uri-list"):
print("accepted_formats")
event.acceptProposedAction()
if self.model() is None:
self.hide()
self.dragged.emit(True)
else:
self.dragged.emit(False)
def dropEvent(self, event):
if event.mimeData().hasFormat("text/uri-list"):
event.acceptProposedAction()
local_file = event.mimeData().urls()[0].toLocalFile()
self.fileDropped.emit(local_file)
else:
self.fileDropped.emit("")
def removeRow(self, row_indexes=None):
model = self.tableModel
try:
if row_indexes is None:
row_indexes = self.selectionModel().selectedRows()
indexes = []
for index in sorted(row_indexes, reverse=True):
model.removeRow(index.row())
indexes.append(index.row())
model.contact_df = model.contact_df.drop(model.contact_df.index[indexes])
model.displayed_contact_df = model.displayed_contact_df.drop(model.displayed_contact_df.index[indexes])
except Exception as e:
print("Error removing row: ", e)
def addRow(self):
if hasattr(self,'tableModel'):
model = self.tableModel
items = []
col_number=len(self.tableModel.header_data)
if col_number:
for col in range(col_number):
item = QStandardItem(str(''))
if col_number - col <=4:
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setEnabled(False)
items.append(item)
model.appendRow(items)
new_row = pd.DataFrame([["" for col in model.contact_df.iloc[0]]],columns=model.contact_df.columns, dtype=str)
model.contact_df = pd.concat([model.contact_df,new_row], ignore_index=True)
new_displayed_row = pd.DataFrame([["" for col in model.displayed_contact_df.iloc[0]]],columns=model.displayed_contact_df.columns, dtype=str)
model.displayed_contact_df = | pd.concat([model.displayed_contact_df,new_displayed_row], ignore_index=True) | pandas.concat |
# --------------
#Importing the modules
import pandas as pd
import numpy as np
from scipy.stats import mode
#Code for categorical variable
def categorical(df):
categorical_var = pd.Categorical(df)
#categorical_var = df.unique()
return categorical_var
#Code for numerical variable
def numerical(df):
numerical_var = df._get_numeric_data().columns
#numerical_var = df.nunique()
return numerical_var
#code to check distribution of variable
def clear(df,col,val):
count = df[col].value_counts()
return count
#Code to check instances based on the condition
def instances_based_condition(df,col1,val1,col2,val2):
instance = df[(df[col1]>val1) & (df[col2]==val2)]
return instance
# Code to calculate different aggreagted values according to month
#pivot = pd.pivot_table(df,index="Type 1",values="Attack speed points",columns="Generation",aggfunc="mean")
#print(pivot)
def agg_values_ina_month(df,date_col,agg_col,agg):
df[date_col]= pd.to_datetime(df[date_col])
aggregated_value = | pd.pivot_table(df,index=date_col,values=agg_col,aggfunc=agg) | pandas.pivot_table |
# ********************************************************************************** #
# #
# Project: Data Frame Explorer # # #
# Author: <NAME> #
# Contact: <EMAIL>(a)<EMAIL> #
# #
# License: MIT License #
# Copyright (C) 2021.01.30 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get basic plt functions, heping with plot mnaking
import matplotlib.pyplot as plt # for making plots,
import matplotlib.gridspec
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from IPython.display import display
from PIL import Image, ImageDraw
from matplotlib import colors
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_datetime64_any_dtype
from matplotlib.font_manager import FontProperties
# Function, ............................................................................
def find_and_display_patter_in_series(*, series, pattern):
"I used that function when i don't remeber full name of a given column"
res = series.loc[series.str.contains(pattern)]
return res
# Function, ............................................................................
def summarize_df(*, df, nr_of_examples_per_category=3, csv_file_name=None, save_dir=None, verbose=False):
"""
Summary table, with basic information on column in large dataframes,
can be applied to dafarames of all sizes, Used to create summary plots
IMPORTANT: this is one of my oldest function, that I am using a lot,
I will soon update it to something better, but generating the same outputs,
and more resiliant to unknownw datatypes,
Parameters/Input
_________________ _______________________________________________________________________________
. Input .
* df DataFrame to summarize
* nr_of_examples_per_category
how many, top/most frequent records shoudl
be collected in each column and used as examples of data inputs form that column
NaN, are ignored, unless column has only NaN
. Saving . The fgunction return Dataframe, even if file name and path to save_dir are not available
In that case the file are not saved.
* csv_file_name .csv file name that will be used to save all three dataFrames create with that function
* save_dir path
Returns
_________________ _______________________________________________________________________________
* data_examples. DataFrame, summary of df, with the follwing values for each column imn df
. name : column name in df, attribute name
. dtype. : {"nan", if ony NaN werer detected in df, "object", "numeric"}
. NaN_perc : percentage of missing data (np.nan, pd.null) in df in a given attirbute
. summary : shor informtaion on type and number of data we shoudl expectc:
if dtype == "numeric": return min, mean and max values
if dtype == "object" : return number of unique classes
or messsage "all nonnull values are unique"
if dtype == nan : return "Missing data Only"
. examples : str, with reqwuested number of most frequent value examples in a given category
. nr_of_unique_values : numeric, scount of all unique values in a category
. nr_of_non_null_values : numeric, count of all non-null values in a category
* top_val_perc DataFrame with % of the top three or most frequence records in each column
in large dataframe that was summarized with summarize_data_and_give_examples()
* top_val_names DataFrame, same as top_val_perc, but with values, saved as string
"""
assert type(df)==pd.DataFrame, "ERROR, df must be pandas dataframe"
# info
if csv_file_name!="none" and save_dir!="none":
if verbose == True:
print("\n! CAUTION ! csv_file_name shoudl be provided wihtout .csv file extension!")
else:
pass
# create df,
col_names =["All_values_are_unique", "Nr_of_unique_values", "Nr_of_non_null_values",
"Examples","dtype", "nr_of_all_rows_in_original_df"]
df_examples = pd.DataFrame(np.zeros([df.shape[1],len(col_names)]), columns=col_names, dtype="object")
# add category names
df_examples["name"] = df.columns
# add NaN percentage,
nan_counts_per_category = df.isnull().sum(axis=0)
my_data = pd.DataFrame(np.round((nan_counts_per_category/df.shape[0])*100, 5), dtype="float64")
df_examples["NaN_perc"] = my_data.reset_index(drop=True)
# add nr of no NaN values
my_data = df.shape[0]-nan_counts_per_category
df_examples["Nr_of_non_null_values"] = my_data.reset_index(drop=True)
# add "nr_of_all_rows_in_original_df"
df_examples["nr_of_all_rows_in_original_df"] = df.shape[0]
# these arr will be filled for future bar plot
arr_example_percentage = np.zeros([df_examples.shape[0],nr_of_examples_per_category])
arr_example_values = np.zeros([df_examples.shape[0],nr_of_examples_per_category],dtype="object")
# add examples and counts
for i, j in enumerate(list(df.columns)):
# add general data ..............................................
# number of unique nonnull values in each column,
df_examples.loc[df_examples.name==j,"Nr_of_unique_values"] = df.loc[:,j].dropna().unique().size
# internal function helpers .....................................
# categorical data, fillin_All_values_are_unique
def fillin_All_values_are_unique(*, df_examples, df):
if (df_examples.loc[df_examples.name==j,"Nr_of_non_null_values"]==0).values[0]:
return "Missing data Only"
elif ((df_examples.loc[df_examples.name==j,"Nr_of_non_null_values"]>0).values[0]) and (df.loc[:,j].dropna().unique().size==df.loc[:,j].dropna().shape[0]):
return "all nonnull values are unique"
elif ((df_examples.loc[df_examples.name==j,"Nr_of_non_null_values"]>0).values[0]) and (df.loc[:,j].dropna().unique().size!=df.loc[:,j].dropna().shape[0]):
return f"{int(df_examples.Nr_of_unique_values[df_examples.name==j].values[0])} classes"
else:
pass
# fill other columns ..............................................
# this is auto-fill in case there is no data
if df[j].isnull().sum()==df.shape[0]:
# (df_examples.loc[df_examples.name==j,"NaN_perc"]==100).values[0]: this value was rounded up/down and was returning false positives!!!!
df_examples.loc[df_examples.name==j,"All_values_are_unique"] = "missing data only"
df_examples.loc[df_examples.name==j,"Nr_of_non_null_values"] = 0 # it should be 0, but i overwrite it just in case
df_examples.loc[df_examples.name==j,"Nr_of_unique_values"] = 0 # it should be 0, but i overwrite it just in case
df_examples.loc[df_examples.name==j,"Examples"] = "missing data only"
df_examples.loc[df_examples.name==j,"dtype"] = "missing data only" # because I dont want to use that in any further reading
# in other cases, we can create data examples, from nonnull values, depending on their type,
else:
if is_string_dtype(df[j]):
# dtype,
df_examples.loc[df_examples.name==j,"dtype"]= "text"
# All_values_are_unique,
# use helper function, to find if there are only unique categorical values eg: url, place example in dct,
df_examples.loc[df_examples.name==j,"All_values_are_unique"] = fillin_All_values_are_unique(df_examples=df_examples, df=df)
# Examples,
count_noNa_values_sorted = df.loc[:,j].dropna().value_counts().sort_values(ascending=False)
perc_noNa_values_sorted = count_noNa_values_sorted/np.sum(count_noNa_values_sorted)*100
s = perc_noNa_values_sorted.iloc[0:nr_of_examples_per_category].round(1)
if len(s.index.values.tolist())>0:
ind = | pd.Series(s.index) | pandas.Series |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import codecademylib3_seaborn
import glob
#for missing values in race
def race_nan(col):
percent = 100
races = ['Hispanic','Black','Pacific','Asian','White','Native']
for race in races:
if race == col:
continue
else:
percent = percent - us_census[race]
return percent
#combine files into a list
files = glob.glob('states[0-9].csv')
#turn files into pandas dataframes and concatenate them
dataset = [ | pd.read_csv(file) | pandas.read_csv |
# coding: utf-8
# # 각 유저의 구매기록으로 대분류, 소분류, 요일, 시간별 구매 확률
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import csv
import pickle
import xgboost as xgb
'''
유저별로 구매 기록을 보고 구매된 대분류 확률을 구하는 함수.
각 유저가 어떤 대분류에 대해 살 확률이 높은지 확인
@ dep_sum : 유저의 기록중 포함된 대분류별 기록의 수를 구한다.
@ dep_prob : 대분류별 확률을 구한 값이 포함된 table
'''
def dep_prob():
orders_df = pd.read_csv("orders.csv", usecols=["order_id","user_id"])
prior_df = pd.read_csv("order_products__prior.csv", usecols = ["order_id", "product_id"])
products_df = pd.read_csv("products.csv", usecols = ["product_id", "department_id"])
#merge
order_prior =pd.merge(prior_df, orders_df, how='inner', on=['order_id'])
order_prior_product = | pd.merge(order_prior, products_df, how='inner', on=['product_id']) | pandas.merge |
import os, cv2, glob
import numpy as np
import pandas as pd
import torch
import torch.utils.data as data
from torchvision import datasets, models, transforms
from PIL import Image
from sklearn.utils import shuffle
import settings
import settings_retrieval
from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90, RandomBrightnessContrast,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, Resize, RandomSizedCrop,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, VerticalFlip,
IAASharpen, IAAEmboss, RandomContrast, RandomBrightness, Flip, OneOf, Compose, RandomGamma, ElasticTransform, ChannelShuffle,RGBShift, Rotate
)
class Rotate90(RandomRotate90):
def apply(self, img, factor=1, **params):
return np.ascontiguousarray(np.rot90(img, factor))
DATA_DIR = settings.DATA_DIR
def get_classes(num_classes, start_index=0, other=False):
df = pd.read_csv(os.path.join(DATA_DIR, 'train', 'top203094_classes.csv'))
classes = df.classes.values.tolist()[start_index: start_index+num_classes]
if other:
classes.append(-1)
assert num_classes == len(classes)
stoi = { classes[i]: i for i in range(len(classes))}
return classes, stoi
def get_filename(img_id, img_dir, test_data=False, flat=False, stage2=False):
if stage2:
return os.path.join(img_dir, img_id[0], img_id[1], img_id[2], '{}.jpg'.format(img_id))
if test_data:
for i in range(10):
fn = os.path.join(img_dir, str(i), '{}.jpg'.format(img_id))
if os.path.exists(fn):
return fn
raise AssertionError('image not found: {}'.format(img_id))
elif flat:
return os.path.join(img_dir, '{}.jpg'.format(img_id))
else:
return os.path.join(img_dir, img_id[0], img_id[1], img_id[2], '{}.jpg'.format(img_id))
train_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # open images mean and std
])
def img_augment(p=.8):
return Compose([
HorizontalFlip(.5),
OneOf([
CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomContrast(),
RandomBrightness(),
], p=0.3),
#
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=20, p=.75 ),
Blur(blur_limit=3, p=.33),
OpticalDistortion(p=.33),
GridDistortion(p=.33),
#HueSaturationValue(p=.33)
], p=p)
def weak_augment(p=.8):
return Compose([
RandomSizedCrop((200, 250), 256, 256, p=0.8),
RandomRotate90(p=0.05),
OneOf([
#CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomContrast(),
RandomBrightness(),
], p=0.3),
#
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.1, rotate_limit=15, p=.75 ),
Blur(blur_limit=3, p=.33),
OpticalDistortion(p=.33),
#GridDistortion(p=.33),
#HueSaturationValue(p=.33)
], p=p)
def get_tta_aug_old(tta_index=0):
if tta_index == 0:
return Compose([Resize(256, 256)], p=1.)
else:
return Compose([RandomSizedCrop((200, 250), 256, 256, p=1.)], p=1.0)
def get_tta_aug(tta_index=None):
tta_augs = {
1: [HorizontalFlip(always_apply=True)],
2: [VerticalFlip(always_apply=True)],
3: [HorizontalFlip(always_apply=True),VerticalFlip(always_apply=True)],
4: [Rotate90(always_apply=True)],
5: [Rotate90(always_apply=True), HorizontalFlip(always_apply=True)],
6: [VerticalFlip(always_apply=True), Rotate90(always_apply=True)],
7: [HorizontalFlip(always_apply=True),VerticalFlip(always_apply=True), Rotate90(always_apply=True)],
}
return Compose(tta_augs[tta_index], p=1.0)
class ImageDataset(data.Dataset):
def __init__(self, df, img_dir, train_mode=True, test_data=False, flat=False, input_size=256, tta_index=None, stage2=False):
self.input_size = input_size
self.df = df
self.img_dir = img_dir
self.train_mode = train_mode
self.transforms = train_transforms
self.test_data = test_data
self.flat = flat
self.tta_index = tta_index
self.stage2 = stage2
def get_img(self, fn):
# open with PIL and transform
#img = Image.open(fn, 'r')
#img = img.convert('RGB')
#img = self.transforms(img)
# cv2 and albumentations
img = cv2.imread(fn)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.stage2:
img = cv2.resize(img, (256, 256))
elif self.train_mode:
#aug = img_augment(p=0.8)
aug = weak_augment(p=0.8)
img = aug(image=img)['image']
elif self.tta_index is not None and self.tta_index > 0:
aug = get_tta_aug(self.tta_index)
img = aug(image=img)['image']
#if self.input_size != 256:
# aug = resize_aug(p=1.)
# img = aug(image=img)['image']
img = transforms.functional.to_tensor(img)
img = transforms.functional.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
#img = img.transpose((2,0,1))
#img = (img /255).astype(np.float32)
#print(img.shape)
#normalize
#mean=[0.485, 0.456, 0.406]
#std=[0.229, 0.224, 0.225]
#img[0, :,:,] = (img[0, :,:,] - mean[0]) / std[0]
#img[1, :,:,] = (img[1, :,:,] - mean[1]) / std[1]
#img[2, :,:,] = (img[2, :,:,] - mean[2]) / std[2]
#img = torch.tensor(img)
return img
def __getitem__(self, index):
row = self.df.iloc[index]
try:
fn = get_filename(row['id'], self.img_dir, self.test_data, self.flat, self.stage2)
except AssertionError:
if self.flat or self.stage2:
raise
return torch.zeros(3, self.input_size, self.input_size), 0
#print(fn)
img = self.get_img(fn)
if self.flat:
return img
elif self.test_data:
return img, 1
else:
return img, row['label']
def __len__(self):
return len(self.df)
def collate_fn(self, batch):
if self.flat:
return torch.stack(batch)
else:
imgs = torch.stack([x[0] for x in batch])
labels = torch.tensor([x[1] for x in batch])
return imgs, labels
def get_train_val_loaders(num_classes, start_index=0, batch_size=4, dev_mode=False, val_num=6000, val_batch_size=1024, other=False, tta_index=None):
classes, stoi = get_classes(num_classes, start_index=start_index, other=other)
train_df = None
val_df = None
if num_classes == 50000 and start_index == 0:
df = pd.read_csv(os.path.join(DATA_DIR, 'train', 'train_{}.csv'.format(num_classes)))
df['label'] = df.landmark_id.map(lambda x: stoi[x])
elif num_classes == 203094:
df_all = pd.read_csv(os.path.join(DATA_DIR, 'train', 'train.csv'))
df = shuffle(df_all, random_state=1234)
df['label'] = df.landmark_id.map(lambda x: stoi[x])
else:
df_all = pd.read_csv(os.path.join(DATA_DIR, 'train', 'train.csv'))
df_selected = shuffle(df_all[df_all.landmark_id.isin(set(classes))].copy().sort_values(by='id'), random_state=1234)
df_selected['label'] = df_selected.landmark_id.map(lambda x: stoi[x])
split_index = int(len(df_selected) * 0.95)
train_df = df_selected[:split_index]
val_df = df_selected[split_index:]
if val_num is not None:
val_df = val_df[:val_num]
if other:
df_other = df_all[~df_all.landmark_id.isin(set(classes))].sample(1000)
df_other['label'] = num_classes-1 # TODO handle this at prediction
train_df = | pd.concat([train_df, df_other], sort=False) | pandas.concat |
# Copyright (C) 2018 GuQiangJs.
# Licensed under Apache License 2.0 <see LICENSE file>
import datetime
import json
import pandas as pd
from finance_datareader_py import _AbsDailyReader
__all__ = ['GtimgDailyReader']
class GtimgDailyReader(_AbsDailyReader):
"""从 gtimg 读取每日成交汇总数据(支持获取前复权、后复权的数据)
Args:
symbols: 股票代码。**此参数只接收单一股票代码**。For example:600001,000002
prefix: 股票代码前缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请填写 `sz` 或 `sh`。
suffix: 股票代码后缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请自行填写。
type: {None, 'qfq', 'hfq'}, 默认值 None
* None: 不复权(默认)
* 'qfq': 前复权
* 'hfq': 后复权
start: 开始日期。默认值:2004-10-08
end: 结束日期。默认值:当前日期的 **前一天** 。
retry_count: 重试次数
pause: 重试间隔时间
session:
chunksize:
"""
def __init__(self, symbols=None, prefix='', suffix='', type=None,
start=datetime.date(2004, 10, 8),
end=datetime.date.today() + datetime.timedelta(days=-1),
retry_count=3, pause=1, session=None,
chunksize=25):
"""
Args:
symbols: 股票代码。**此参数只接收单一股票代码**。For example:600001
prefix: 股票代码前缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请填写 `sz` 或 `sh`。
suffix: 股票代码后缀。默认为空。
* 为空表示会自动根据股票代码判断。
* 对于某些特定指数请自行填写。
type: {None, 'qfq', 'hfq'}, 默认值 None
* None: 不复权(默认)
* 'qfq': 前复权
* 'hfq': 后复权
start: 开始日期。默认值:2004-10-08
end: 结束日期。默认值:当前日期的 **前一天** 。
retry_count: 重试次数
pause: 重试间隔时间
session:
chunksize:
"""
super(GtimgDailyReader, self).__init__(symbols, start, end,
retry_count, pause, session,
chunksize)
self._type = type
self._prefix = prefix
self._suffix = suffix
@property
def url(self):
# http://web.ifzq.gtimg.cn/appstock/app/fqkline/get?param=sz000002,day,2010-01-01,2018-12-31,6400,
# http://web.ifzq.gtimg.cn/appstock/app/fqkline/get?param=sz000002,day,2010-01-01,2018-12-31,6400,qfq
# http://web.ifzq.gtimg.cn/appstock/app/fqkline/get?param=sz000002,day,2010-01-01,2018-12-31,6400,hfq
return 'http://web.ifzq.gtimg.cn/appstock/app/fqkline/get'
def _parse_symbol(self):
# 深市前加sz,沪市前加sh
if self._prefix:
return self._prefix + str(self.symbols) + self._suffix
return ('sh' if str(self.symbols)[0] == '6'
else 'sz' if str(self.symbols)[0] == '0' or str(self.symbols)[
0] == '3' else '') + str(self.symbols) + self._suffix
def _parse_count(self):
return (self.end - self.start).days + 1
def _get_params(self, *args, **kwargs):
f = '%Y-%m-%d'
return {'param': '{symbol},day,{start},{end},{count},{fq}'.format(
symbol=self._parse_symbol(), start=self.start.strftime(f),
end=self.end.strftime(f), fq=self._type if self._type else '',
count=self._parse_count())}
def read(self):
"""读取数据
Returns:
``pandas.DataFrame``:
无数据时返回空白的 ``pandas.DataFrame`` 。参见 ``pandas.DataFrame.empty``。
部分返回列名说明:
* Open:开盘价
* Close: 收盘价
* High: 最高价
* Low: 最低价
* Volume: 交易量(手)
Examples:
.. code-block:: python
>>> from finance_datareader_py.gtimg.daily import GtimgDailyReader
>>> df = GtimgDailyReader(symbols='000002').read()
>>> print(df.tail())
Open Close High Low Volume
Date
2018-08-06 21.18 20.86 21.32 20.52 315702.0
2018-08-07 21.15 21.86 21.86 20.93 451653.0
2018-08-08 21.89 21.50 22.29 21.50 410720.0
2018-08-09 21.50 22.48 22.55 21.40 896200.0
2018-08-10 23.00 23.18 24.07 22.93 1201163.0
"""
try:
return super(GtimgDailyReader, self).read()
finally:
self.close()
def _read_url_as_StringIO(self, url, params=None):
"""读取原始数据"""
response = self._get_response(url, params=params)
txt = GtimgDailyReader._get_split_txt(response.text)
if not txt:
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from util.helper import ensure_dir
class QL:
@classmethod
def sim(cls, alpha, beta, persv, p0, p1, trials):
Q = np.array([0.0, 0.0])
a_list = []
r_list = []
a = -1
for t in range(trials):
pol0 = np.exp(beta * Q[0] + (a == 0) * persv) / \
(np.exp(beta * Q[0] + (a == 0) * persv) + np.exp((a == 1) * persv + beta * Q[1]))
if np.random.uniform(0, 1) < pol0:
a = 0
if np.random.uniform(0, 1) < p0:
r = 1
else:
r = 0
else:
a = 1
if np.random.uniform(0, 1) < p1:
r = 1
else:
r = 0
a_list.append(a)
r_list.append(r)
Q[a] = (1 - alpha) * Q[a] + alpha * r
return a_list, r_list
@classmethod
def sim_OFF(cls, alpha, beta, persv, rewards, actions):
Q = np.array([0.0, 0.0])
a_list = []
r_list = []
a = -1
pol_list = []
for t in range(len(rewards)):
pol0 = np.exp(beta * Q[0] + (a == 0) * persv) / \
(np.exp(beta * Q[0] + (a == 0) * persv) + np.exp((a == 1) * persv + beta * Q[1]))
pol_list.append(pol0)
a = actions[t]
r = rewards[t]
a_list.append(a)
r_list.append(r)
Q[a] = (1 - alpha) * Q[a] + alpha * r
return pol_list
@classmethod
def generate_off(cls):
rewards = [0] * 10
rewards[4] = 1
# rewards[14] = 1
actions = [0] * 10
ind = 0
for kappa in np.linspace(-1.2, 1.2, num=15):
beta = 3
z_dim = 1
other_dim = 0
path = "../nongit/local/synth/sims/dims/A1/z0/_" + str(ind) + '/'
ensure_dir(path)
pol = np.array(cls.sim_OFF(0.2, beta, kappa, rewards, actions))
polpd = pd.DataFrame({'0': pol, '1': 1 - pol, 'id': 'id1', 'block': 1})
polpd.to_csv(path + "policies-.csv")
train = pd.DataFrame({'reward': rewards, 'action': actions, 'state0':'', 'id': 'id1', 'block': 1})
train.to_csv(path + "train.csv")
np.savetxt(path + "z.csv", np.array([[beta, kappa]]), delimiter=',')
pd.DataFrame({'z_dim': [z_dim], 'other_dim': [other_dim]}).to_csv(path + "z_info.csv")
ind += 1
ind = 0
for beta in np.linspace(0, 9, num=15):
kappa = 0
z_dim = 0
other_dim = 1
path = "../nongit/local/synth/sims/dims/A1/z1/_" + str(ind) + '/'
ensure_dir(path)
pol = np.array(cls.sim_OFF(0.2, beta, kappa, rewards, actions))
polpd = pd.DataFrame({'0': pol, '1': 1 - pol, 'id': 'id1', 'block': 1})
polpd.to_csv(path + "policies-.csv")
train = | pd.DataFrame({'reward': rewards, 'action': actions, 'state0':'', 'id': 'id1', 'block': 1}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
"""
LICENSE MIT
2020
<NAME>
Website : http://www.covidtracker.fr
Mail : <EMAIL>
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
# In[2]:
import pandas as pd
import plotly.express as px
from datetime import timedelta
import france_data_management as data
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from datetime import datetime
import plotly
import cv2
import numpy as np
PATH = "../../"
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
# In[3]:
COULEUR_NON_VACCINES = "#C65102"
COULEUR_COMPLETEMENT_VACCINES = "#00308F"
COULEUR_COMPLETEMENT_VACCINES_RAPPEL = "black"
COULEUR_PARTIELLEMENT_VACCINES = "#4777d6"
# In[4]:
df_drees = | pd.read_csv("https://data.drees.solidarites-sante.gouv.fr/explore/dataset/covid-19-resultats-issus-des-appariements-entre-si-vic-si-dep-et-vac-si/download/?format=csv&timezone=Europe/Berlin&lang=fr&use_labels_for_header=true&csv_separator=%3B", sep=";") | pandas.read_csv |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by <NAME> (<EMAIL>) 02/08/2021, 12:05. Copyright (c) <NAME>
import os
import pickle
import warnings
from copy import deepcopy
from itertools import product
from typing import Tuple, List, Dict, Union
import numpy as np
import pandas as pd
from astropy import wcs
from astropy.coordinates import SkyCoord
from astropy.cosmology import Planck15
from astropy.cosmology.core import Cosmology
from astropy.units import Quantity, UnitBase, Unit, UnitConversionError, deg
from fitsio import FITS
from numpy import ndarray
from regions import SkyRegion, EllipseSkyRegion, CircleSkyRegion, EllipsePixelRegion, CirclePixelRegion
from regions import read_ds9, PixelRegion
from .. import xga_conf
from ..exceptions import NotAssociatedError, NoValidObservationsError, MultipleMatchError, \
NoProductAvailableError, NoMatchFoundError, ModelNotAssociatedError, ParameterNotAssociatedError
from ..imagetools.misc import pix_deg_scale
from ..imagetools.misc import sky_deg_scale
from ..imagetools.profile import annular_mask
from ..products import PROD_MAP, EventList, BaseProduct, BaseAggregateProduct, Image, Spectrum, ExpMap, \
RateMap, PSFGrid, BaseProfile1D, AnnularSpectra
from ..sourcetools import simple_xmm_match, nh_lookup, ang_to_rad, rad_to_ang
from ..sourcetools.misc import coord_to_name
from ..utils import ALLOWED_PRODUCTS, XMM_INST, dict_search, xmm_det, xmm_sky, OUTPUT, CENSUS
# This disables an annoying astropy warning that pops up all the time with XMM images
# Don't know if I should do this really
warnings.simplefilter('ignore', wcs.FITSFixedWarning)
class BaseSource:
"""
The overlord of all XGA classes, the superclass for all source classes. This contains a huge amount of
functionality upon which the rest of XGA is built, includes selecting observations, reading in data products,
and storing newly created data products.
"""
def __init__(self, ra: float, dec: float, redshift: float = None, name: str = None, cosmology=Planck15,
load_products: bool = True, load_fits: bool = False):
"""
The init method for the BaseSource, the most general type of XGA source which acts as a superclass for all
others. Base functionality is included, but this type of source shouldn't often need to be instantiated by
a user.
:param float ra: The right ascension (in degrees) of the source.
:param float dec: The declination (in degrees) of the source.
:param float redshift: The redshift of the source, default is None. Not supplying a redshift means that
proper distance units such as kpc cannot be used.
:param str name: The name of the source, default is None in which case a name will be assembled from the
coordinates given.
:param cosmology: An astropy cosmology object to use for analysis of this source, default is Planck15.
:param bool load_products: Should existing XGA generated products for this source be loaded in, default
is True.
:param bool load_fits: Should existing XSPEC fits for this source be loaded in, will only work if
load_products is True. Default is False.
"""
self._ra_dec = np.array([ra, dec])
if name is not None:
# We don't be liking spaces in source names, we also don't like underscores
self._name = name.replace(" ", "").replace("_", "-")
else:
self._name = coord_to_name(self.ra_dec)
# This is where profile products generated by XGA will live for this source
if not os.path.exists(OUTPUT + "profiles/{}".format(self.name)):
os.makedirs(OUTPUT + "profiles/{}".format(self.name))
# And create an inventory file for that directory
if not os.path.exists(OUTPUT + "profiles/{}/inventory.csv".format(self.name)):
with open(OUTPUT + "profiles/{}/inventory.csv".format(self.name), 'w') as inven:
inven.writelines(["file_name,obs_ids,insts,info_key,src_name,type"])
# We now create a directory for custom region files for the source to be stored in
if not os.path.exists(OUTPUT + "regions/{0}/{0}_custom.reg".format(self.name)):
os.makedirs(OUTPUT + "regions/{}".format(self.name))
# And a start to the custom file itself, with red (pnt src) as the default colour
with open(OUTPUT + "regions/{0}/{0}_custom.reg".format(self.name), 'w') as reggo:
reggo.write("global color=white\n")
# Only want ObsIDs, not pointing coordinates as well
# Don't know if I'll always use the simple method
matches = simple_xmm_match(ra, dec)
obs = matches["ObsID"].values
instruments = {o: [] for o in obs}
for o in obs:
if matches[matches["ObsID"] == o]["USE_PN"].values[0]:
instruments[o].append("pn")
if matches[matches["ObsID"] == o]["USE_MOS1"].values[0]:
instruments[o].append("mos1")
if matches[matches["ObsID"] == o]["USE_MOS2"].values[0]:
instruments[o].append("mos2")
# This checks that the observations have at least one usable instrument
self._obs = [o for o in obs if len(instruments[o]) > 0]
self._instruments = {o: instruments[o] for o in self._obs if len(instruments[o]) > 0}
# self._obs can be empty after this cleaning step, so do quick check and raise error if so.
if len(self._obs) == 0:
raise NoValidObservationsError("{s} has {n} observations ({a}), none of which have the necessary"
" files.".format(s=self.name, n=len(self._obs), a=", ".join(self._obs)))
# Here I set up the ObsID directories for products generated by XGA to be stored in, they also get an
# inventory file to store information about them - largely because some of the informative file names
# I was using were longer than 256 characters which my OS does not support
for o in self._obs:
if not os.path.exists(OUTPUT + o):
os.mkdir(OUTPUT + o)
if not os.path.exists(OUTPUT + '{}/inventory.csv'.format(o)):
with open(OUTPUT + '{}/inventory.csv'.format(o), 'w') as inven:
inven.writelines(['file_name,obs_id,inst,info_key,src_name,type'])
# Check in a box of half-side 5 arcminutes, should give an idea of which are on-axis
try:
on_axis_match = simple_xmm_match(ra, dec, Quantity(5, 'arcmin'))["ObsID"].values
except NoMatchFoundError:
on_axis_match = np.array([])
self._onaxis = list(np.array(self._obs)[np.isin(self._obs, on_axis_match)])
# nhlookup returns average and weighted average values, so just take the first
self._nH = nh_lookup(self.ra_dec)[0]
self._redshift = redshift
self._products, region_dict, self._att_files = self._initial_products()
# Want to update the ObsIDs associated with this source after seeing if all files are present
self._obs = list(self._products.keys())
self._instruments = {o: instruments[o] for o in self._obs if len(instruments[o]) > 0}
self._cosmo = cosmology
if redshift is not None:
self._lum_dist = self._cosmo.luminosity_distance(self._redshift)
self._ang_diam_dist = self._cosmo.angular_diameter_distance(self._redshift)
else:
self._lum_dist = None
self._ang_diam_dist = None
self._initial_regions, self._initial_region_matches = self._load_regions(region_dict)
# This is a queue for products to be generated for this source, will be a numpy array in practise.
# Items in the same row will all be generated in parallel, whereas items in the same column will
# be combined into a command stack and run in order.
self.queue = None
# Another attribute destined to be an array, will contain the output type of each command submitted to
# the queue array.
self.queue_type = None
# This contains an array of the paths of the final output of each command in the queue
self.queue_path = None
# This contains an array of the extra information needed to instantiate class
# after the SAS command has run
self.queue_extra_info = None
# Defining this here, although it won't be set to a boolean value in this superclass
self._detected = None
# This block defines various dictionaries that are used in the sub source classes, when context allows
# us to find matching source regions.
self._regions = None
self._other_regions = None
self._alt_match_regions = None
self._interloper_regions = []
self._interloper_masks = {}
# Set up an attribute where a default central coordinate will live
self._default_coord = self.ra_dec
# Init the the radius multipliers that define the outer and inner edges of a background annulus
self._back_inn_factor = 1.05
self._back_out_factor = 1.5
# Initialisation of fit result attributes
self._fit_results = {}
self._test_stat = {}
self._dof = {}
self._total_count_rate = {}
self._total_exp = {}
self._luminosities = {}
# Initialisation of attributes related to Extended and GalaxyCluster sources
self._peaks = None
# Initialisation of allowed overdensity radii as None
if not hasattr(self, 'r200'):
self._r200 = None
if not hasattr(self, 'r500'):
self._r500 = None
if not hasattr(self, 'r2500'):
self._r2500 = None
# Also adding a radius dictionary attribute
if not hasattr(self, "_radii"):
self._radii = {}
# Initialisation of cluster observables as None
self._richness = None
self._richness_err = None
self._wl_mass = None
self._wl_mass_err = None
self._peak_lo_en = Quantity(0.5, 'keV')
self._peak_hi_en = Quantity(2.0, 'keV')
# These attributes pertain to the cleaning of observations (as in disassociating them from the source if
# they don't include enough of the object we care about).
self._disassociated = False
self._disassociated_obs = {}
# If there is an existing XGA output directory, then it makes sense to search for products that XGA
# may have already generated and load them in - saves us wasting time making them again.
# The user does have control over whether this happens or not though.
# This goes at the end of init to make sure everything necessary has been declared
if os.path.exists(OUTPUT) and load_products:
self._existing_xga_products(load_fits)
# Now going to save load_fits in an attribute, just because if the observation is cleaned we need to
# run _existing_xga_products again, same for load_products
self._load_fits = load_fits
self._load_products = load_products
@property
def ra_dec(self) -> Quantity:
"""
A getter for the original ra and dec entered by the user.
:return: The ra-dec coordinates entered by the user when the source was first defined
:rtype: Quantity
"""
# Easier for it be internally kep as a numpy array, but I want the user to have astropy coordinates
return Quantity(self._ra_dec, 'deg')
@property
def default_coord(self) -> Quantity:
"""
A getter for the default analysis coordinate of this source.
:return: An Astropy quantity containing the default analysis coordinate.
:rtype: Quantity
"""
return self._default_coord
@default_coord.setter
def default_coord(self, new_coord: Quantity):
"""
Setter for the default analysis coordinate of this source.
:param Quantity new_coord: The new default coordinate.
"""
if not new_coord.unit.is_equivalent('deg'):
raise UnitConversionError("The new coordinate must be in degrees")
else:
new_coord = new_coord.to("deg")
self._default_coord = new_coord
def _initial_products(self) -> Tuple[dict, dict, dict]:
"""
Assembles the initial dictionary structure of existing XMM data products associated with this source.
:return: A dictionary structure detailing the data products available at initialisation, another
dictionary containing paths to region files, and another dictionary containing paths to attitude files.
:rtype: Tuple[dict, dict, dict]
"""
def read_default_products(en_lims: tuple) -> Tuple[str, dict]:
"""
This nested function takes pairs of energy limits defined in the config file and runs
through the default XMM products defined in the config file, filling in the energy limits and
checking if the file paths exist. Those that do exist are read into the relevant product object and
returned.
:param tuple en_lims: A tuple containing a lower and upper energy limit to generate file names for,
the first entry should be the lower limit, the second the upper limit.
:return: A dictionary key based on the energy limits for the file paths to be stored under, and the
dictionary of file paths.
:rtype: tuple[str, dict]
"""
not_these = ["root_xmm_dir", "lo_en", "hi_en", evt_key, "attitude_file"]
# Formats the generic paths given in the config file for this particular obs and energy range
files = {k.split('_')[1]: v.format(lo_en=en_lims[0], hi_en=en_lims[1], obs_id=obs_id)
for k, v in xga_conf["XMM_FILES"].items() if k not in not_these and inst in k}
# It is not necessary to check that the files exist, as this happens when the product classes
# are instantiated. So whether the file exists or not, an object WILL exist, and you can check if
# you should use it for analysis using the .usable attribute
# This looks up the class which corresponds to the key (which is the product
# ID in this case e.g. image), then instantiates an object of that class
lo = Quantity(float(en_lims[0]), 'keV')
hi = Quantity(float(en_lims[1]), 'keV')
prod_objs = {key: PROD_MAP[key](file, obs_id=obs_id, instrument=inst, stdout_str="", stderr_str="",
gen_cmd="", lo_en=lo, hi_en=hi)
for key, file in files.items() if os.path.exists(file)}
# If both an image and an exposure map are present for this energy band, a RateMap object is generated
if "image" in prod_objs and "expmap" in prod_objs:
prod_objs["ratemap"] = RateMap(prod_objs["image"], prod_objs["expmap"])
# Adds in the source name to the products
for prod in prod_objs:
prod_objs[prod].src_name = self._name
# As these files existed already, I don't have any stdout/err strings to pass, also no
# command string.
bound_key = "bound_{l}-{u}".format(l=float(en_lims[0]), u=float(en_lims[1]))
return bound_key, prod_objs
# This dictionary structure will contain paths to all available data products associated with this
# source instance, both pre-generated and made with XGA.
obs_dict = {obs: {} for obs in self._obs}
# Regions will get their own dictionary, I don't care about keeping the reg_file paths as
# an attribute because they get read into memory in the init of this class
reg_dict = {}
# Attitude files also get their own dictionary, they won't be read into memory by XGA
att_dict = {}
# Use itertools to create iterable and avoid messy nested for loop
# product makes iterable of tuples, with all combinations of the events files and ObsIDs
for oi in product(obs_dict, XMM_INST):
# Produces a list of the combinations of upper and lower energy bounds from the config file.
en_comb = zip(xga_conf["XMM_FILES"]["lo_en"], xga_conf["XMM_FILES"]["hi_en"])
# This is purely to make the code easier to read
obs_id = oi[0]
inst = oi[1]
if inst not in self._instruments[obs_id]:
continue
evt_key = "clean_{}_evts".format(inst)
evt_file = xga_conf["XMM_FILES"][evt_key].format(obs_id=obs_id)
reg_file = xga_conf["XMM_FILES"]["region_file"].format(obs_id=obs_id)
# Attitude file is a special case of data product, only SAS should ever need it, so it doesn't
# have a product object
att_file = xga_conf["XMM_FILES"]["attitude_file"].format(obs_id=obs_id)
if os.path.exists(evt_file) and os.path.exists(att_file):
# An instrument subsection of an observation will ONLY be populated if the events file exists
# Otherwise nothing can be done with it.
obs_dict[obs_id][inst] = {"events": EventList(evt_file, obs_id=obs_id, instrument=inst,
stdout_str="", stderr_str="", gen_cmd="")}
att_dict[obs_id] = att_file
# Dictionary updated with derived product names
map_ret = map(read_default_products, en_comb)
obs_dict[obs_id][inst].update({gen_return[0]: gen_return[1] for gen_return in map_ret})
if os.path.exists(reg_file):
# Regions dictionary updated with path to region file, if it exists
reg_dict[obs_id] = reg_file
else:
reg_dict[obs_id] = None
# Cleans any observations that don't have at least one instrument associated with them
obs_dict = {o: v for o, v in obs_dict.items() if len(v) != 0}
if len(obs_dict) == 0:
raise NoValidObservationsError("{s} has {n} observations ({a}), none of which have the necessary"
" files.".format(s=self.name, n=len(self._obs), a=", ".join(self._obs)))
return obs_dict, reg_dict, att_dict
def update_products(self, prod_obj: Union[BaseProduct, BaseAggregateProduct, BaseProfile1D,
List[BaseProduct], List[BaseAggregateProduct], List[BaseProfile1D]]):
"""
Setter method for the products attribute of source objects. Cannot delete existing products,
but will overwrite existing products. Raises errors if the ObsID is not associated
with this source or the instrument is not associated with the ObsID. Lists of products can also be passed
and will be added to the source storage structure, these lists may also contain None values, as typically
XGA will return None if a profile fails to generate (for instance), in which case that entry will simply
be ignored.
:param BaseProduct/BaseAggregateProduct/BaseProfile1D/List[BaseProduct]/List[BaseProfile1D] prod_obj: The
new product object(s) to be added to the source object.
"""
# Aggregate products are things like PSF grids and sets of annular spectra.
if not isinstance(prod_obj, (BaseProduct, BaseAggregateProduct, BaseProfile1D, list)) and prod_obj is not None:
raise TypeError("Only product objects can be assigned to sources.")
elif isinstance(prod_obj, list) and not all([isinstance(p, (BaseProduct, BaseAggregateProduct, BaseProfile1D))
or p is None for p in prod_obj]):
raise TypeError("If a list is passed, only product objects (or None values) may be included.")
elif not isinstance(prod_obj, list):
prod_obj = [prod_obj]
for po in prod_obj:
if po is not None:
if isinstance(po, Image):
extra_key = po.storage_key
en_key = "bound_{l}-{u}".format(l=float(po.energy_bounds[0].value),
u=float(po.energy_bounds[1].value))
elif type(po) == Spectrum or type(po) == AnnularSpectra or isinstance(po, BaseProfile1D):
extra_key = po.storage_key
elif type(po) == PSFGrid:
# The first part of the key is the model used (by default its ELLBETA for example), and
# the second part is the number of bins per side. - Enough to uniquely identify the PSF.
extra_key = po.model + "_" + str(po.num_bins)
else:
extra_key = None
# All information about where to place it in our storage hierarchy can be pulled from the product
# object itself
obs_id = po.obs_id
inst = po.instrument
p_type = po.type
# Previously, merged images/exposure maps were stored in a separate dictionary, but now everything lives
# together - merged products do get a 'combined' prefix on their product type key though
if obs_id == "combined":
p_type = "combined_" + p_type
# 'Combined' will effectively be stored as another ObsID
if "combined" not in self._products:
self._products["combined"] = {}
# The product gets the name of this source object added to it
po.src_name = self.name
# Double check that something is trying to add products from another source to the current one.
if obs_id != "combined" and obs_id not in self._products:
raise NotAssociatedError("{o} is not associated with this X-ray source.".format(o=obs_id))
elif inst != "combined" and inst not in self._products[obs_id]:
raise NotAssociatedError("{i} is not associated with XMM observation {o}".format(i=inst, o=obs_id))
if extra_key is not None and obs_id != "combined":
# If there is no entry for this 'extra key' (energy band for instance) already, we must make one
if extra_key not in self._products[obs_id][inst]:
self._products[obs_id][inst][extra_key] = {}
self._products[obs_id][inst][extra_key][p_type] = po
elif extra_key is None and obs_id != "combined":
self._products[obs_id][inst][p_type] = po
# Here we deal with merged products, they live in the same dictionary, but with no instrument entry
# and ObsID = 'combined'
elif extra_key is not None and obs_id == "combined":
if extra_key not in self._products[obs_id]:
self._products[obs_id][extra_key] = {}
self._products[obs_id][extra_key][p_type] = po
elif extra_key is None and obs_id == "combined":
self._products[obs_id][p_type] = po
# This is for an image being added, so we look for a matching exposure map. If it exists we can
# make a ratemap
if p_type == "image":
# No chance of an expmap being PSF corrected, so we just use the energy key to
# look for one that matches our new image
exs = [prod for prod in self.get_products("expmap", obs_id, inst, just_obj=False) if en_key in prod]
if len(exs) == 1:
new_rt = RateMap(po, exs[0][-1])
new_rt.src_name = self.name
self._products[obs_id][inst][extra_key]["ratemap"] = new_rt
# However, if its an exposure map that's been added, we have to look for matching image(s). There
# could be multiple, because there could be a normal image, and a PSF corrected image
elif p_type == "expmap":
# PSF corrected extra keys are built on top of energy keys, so if the en_key is within the extra
# key string it counts as a match
ims = [prod for prod in self.get_products("image", obs_id, inst, just_obj=False)
if en_key in prod[-2]]
# If there is at least one match, we can go to work
if len(ims) != 0:
for im in ims:
new_rt = RateMap(im[-1], po)
new_rt.src_name = self.name
self._products[obs_id][inst][im[-2]]["ratemap"] = new_rt
# The same behaviours hold for combined_image and combined_expmap, but they get
# stored in slightly different places
elif p_type == "combined_image":
exs = [prod for prod in self.get_products("combined_expmap", just_obj=False) if en_key in prod]
if len(exs) == 1:
new_rt = RateMap(po, exs[0][-1])
new_rt.src_name = self.name
# Remember obs_id for combined products is just 'combined'
self._products[obs_id][extra_key]["combined_ratemap"] = new_rt
elif p_type == "combined_expmap":
ims = [prod for prod in self.get_products("combined_image", just_obj=False) if en_key in prod[-2]]
if len(ims) != 0:
for im in ims:
new_rt = RateMap(im[-1], po)
new_rt.src_name = self.name
self._products[obs_id][im[-2]]["combined_ratemap"] = new_rt
if isinstance(po, BaseProfile1D) and not os.path.exists(po.save_path):
po.save()
# Here we make sure to store a record of the added product in the relevant inventory file
if isinstance(po, BaseProduct) and po.obs_id != 'combined':
inven = pd.read_csv(OUTPUT + "{}/inventory.csv".format(po.obs_id), dtype=str)
# Don't want to store a None value as a string for the info_key
if extra_key is None:
info_key = ''
else:
info_key = extra_key
# I want only the name of the file as it is in the storage directory, I don't want an
# absolute path, so I remove the leading information about the absolute location in
# the .path string
f_name = po.path.split(OUTPUT + "{}/".format(po.obs_id))[-1]
# Images, exposure maps, and other such things are not source specific, so I don't want
# the inventory file to assign them a specific source
if isinstance(po, Image):
s_name = ''
else:
s_name = po.src_name
# Creates new pandas series to be appended to the inventory dataframe
new_line = pd.Series([f_name, po.obs_id, po.instrument, info_key, s_name, po.type],
['file_name', 'obs_id', 'inst', 'info_key', 'src_name', 'type'], dtype=str)
# Appends the series
inven = inven.append(new_line, ignore_index=True)
# Checks for rows that are exact duplicates, this should never happen as far as I can tell, but
# if it did I think it would cause problems so better to be safe and add this.
inven.drop_duplicates(subset=None, keep='first', inplace=True)
# Saves the updated inventory file
inven.to_csv(OUTPUT + "{}/inventory.csv".format(po.obs_id), index=False)
elif isinstance(po, BaseProduct) and po.obs_id == 'combined':
inven = pd.read_csv(OUTPUT + "combined/inventory.csv".format(po.obs_id), dtype=str)
# Don't want to store a None value as a string for the info_key
if extra_key is None:
info_key = ''
else:
info_key = extra_key
# We know that this particular product is a combination of multiple ObsIDs, and those ObsIDs
# are not stored explicitly within the product object. However we are currently within the
# source object that they were generated from, thus we do have that information available
# Using the _instruments attribute also gives us access to inst information
i_str = "/".join([i for o in self._instruments for i in self._instruments[o]])
o_str = "/".join([o for o in self._instruments for i in self._instruments[o]])
# They cannot be stored as lists for a single column entry in a csv though, so I am smushing
# them into strings
f_name = po.path.split(OUTPUT + "combined/")[-1]
if isinstance(po, Image):
s_name = ''
else:
s_name = po.src_name
# Creates new pandas series to be appended to the inventory dataframe
new_line = pd.Series([f_name, o_str, i_str, info_key, s_name, po.type],
['file_name', 'obs_ids', 'insts', 'info_key', 'src_name', 'type'], dtype=str)
inven = inven.append(new_line, ignore_index=True)
inven.drop_duplicates(subset=None, keep='first', inplace=True)
inven.to_csv(OUTPUT + "combined/inventory.csv".format(po.obs_id), index=False)
elif isinstance(po, BaseProfile1D) and po.obs_id != 'combined':
inven = pd.read_csv(OUTPUT + "profiles/{}/inventory.csv".format(self.name), dtype=str)
# Don't want to store a None value as a string for the info_key
if extra_key is None:
info_key = ''
else:
info_key = extra_key
f_name = po.save_path.split(OUTPUT + "profiles/{}/".format(self.name))[-1]
i_str = po.instrument
o_str = po.obs_id
# Creates new pandas series to be appended to the inventory dataframe
new_line = pd.Series([f_name, o_str, i_str, info_key, po.src_name, po.type],
['file_name', 'obs_ids', 'insts', 'info_key', 'src_name', 'type'], dtype=str)
inven = inven.append(new_line, ignore_index=True)
inven.drop_duplicates(subset=None, keep='first', inplace=True)
inven.to_csv(OUTPUT + "profiles/{}/inventory.csv".format(self.name), index=False)
elif isinstance(po, BaseProfile1D) and po.obs_id == 'combined':
inven = pd.read_csv(OUTPUT + "profiles/{}/inventory.csv".format(self.name), dtype=str)
# Don't want to store a None value as a string for the info_key
if extra_key is None:
info_key = ''
else:
info_key = extra_key
f_name = po.save_path.split(OUTPUT + "profiles/{}/".format(self.name))[-1]
i_str = "/".join([i for o in self._instruments for i in self._instruments[o]])
o_str = "/".join([o for o in self._instruments for i in self._instruments[o]])
# Creates new pandas series to be appended to the inventory dataframe
new_line = pd.Series([f_name, o_str, i_str, info_key, po.src_name, po.type],
['file_name', 'obs_ids', 'insts', 'info_key', 'src_name', 'type'], dtype=str)
inven = inven.append(new_line, ignore_index=True)
inven.drop_duplicates(subset=None, keep='first', inplace=True)
inven.to_csv(OUTPUT + "profiles/{}/inventory.csv".format(self.name), index=False)
def _existing_xga_products(self, read_fits: bool):
"""
A method specifically for searching an existing XGA output directory for relevant files and loading
them in as XGA products. This will retrieve images, exposure maps, and spectra; then the source product
structure is updated. The method also finds previous fit results and loads them in.
:param bool read_fits: Boolean flag that controls whether past fits are read back in or not.
"""
def parse_image_like(file_path: str, exact_type: str, merged: bool = False) -> BaseProduct:
"""
Very simple little function that takes the path to an XGA generated image-like product (so either an
image or an exposure map), parses the file path and makes an XGA object of the correct type by using
the exact_type variable.
:param str file_path: Absolute path to an XGA-generated XMM data product.
:param str exact_type: Either 'image' or 'expmap', the type of product that the file_path leads to.
:param bool merged: Whether this is a merged file or not.
:return: An XGA product object.
:rtype: BaseProduct
"""
# Get rid of the absolute part of the path, then split by _ to get the information from the file name
im_info = file_path.split("/")[-1].split("_")
if not merged:
# I know its hard coded but this will always be the case, these are files I generate with XGA.
obs_id = im_info[0]
ins = im_info[1]
else:
ins = "combined"
obs_id = "combined"
en_str = [entry for entry in im_info if "keV" in entry][0]
lo_en, hi_en = en_str.split("keV")[0].split("-")
# Have to be astropy quantities before passing them into the Product declaration
lo_en = Quantity(float(lo_en), "keV")
hi_en = Quantity(float(hi_en), "keV")
# Different types of Product objects, the empty strings are because I don't have the stdout, stderr,
# or original commands for these objects.
if exact_type == "image" and "psfcorr" not in file_path:
final_obj = Image(file_path, obs_id, ins, "", "", "", lo_en, hi_en)
elif exact_type == "image" and "psfcorr" in file_path:
final_obj = Image(file_path, obs_id, ins, "", "", "", lo_en, hi_en)
final_obj.psf_corrected = True
final_obj.psf_bins = int([entry for entry in im_info if "bin" in entry][0].split('bin')[0])
final_obj.psf_iterations = int([entry for entry in im_info if "iter" in
entry][0].split('iter')[0])
final_obj.psf_model = [entry for entry in im_info if "mod" in entry][0].split("mod")[0]
final_obj.psf_algorithm = [entry for entry in im_info if "algo" in entry][0].split("algo")[0]
elif exact_type == "expmap":
final_obj = ExpMap(file_path, obs_id, ins, "", "", "", lo_en, hi_en)
else:
raise TypeError("Only image and expmap are allowed.")
return final_obj
og_dir = os.getcwd()
# This is used for spectra that should be part of an AnnularSpectra object
ann_spec_constituents = {}
# This is to store whether all components could be loaded in successfully
ann_spec_usable = {}
for obs in self._obs:
if os.path.exists(OUTPUT + obs):
os.chdir(OUTPUT + obs)
cur_d = os.getcwd() + '/'
# Loads in the inventory file for this ObsID
inven = pd.read_csv("inventory.csv", dtype=str)
# Here we read in instruments and exposure maps which are relevant to this source
im_lines = inven[(inven['type'] == 'image') | (inven['type'] == 'expmap')]
# Instruments is a dictionary with ObsIDs on the top level and then valid instruments on
# the lower level. As such we can be sure here we're only reading in instruments we decided
# are valid
for i in self.instruments[obs]:
# Fetches lines of the inventory which match the current ObsID and instrument
rel_ims = im_lines[(im_lines['obs_id'] == obs) & (im_lines['inst'] == i)]
for r_ind, r in rel_ims.iterrows():
self.update_products(parse_image_like(cur_d+r['file_name'], r['type']))
# For spectra we search for products that have the name of this object in, as they are for
# specific parts of the observation.
# Have to replace any + characters with x, as that's what we did in evselect_spectrum due to SAS
# having some issues with the + character in file names
named = [os.path.abspath(f) for f in os.listdir(".") if os.path.isfile(f) and
self._name.replace("+", "x") in f and obs in f
and (XMM_INST[0] in f or XMM_INST[1] in f or XMM_INST[2] in f)]
specs = [f for f in named if "spec" in f.split('/')[-1] and "back" not in f.split('/')[-1]]
for sp in specs:
# Filename contains a lot of useful information, so splitting it out to get it
sp_info = sp.split("/")[-1].split("_")
# Reading these out into variables mostly for my own sanity while writing this
obs_id = sp_info[0]
inst = sp_info[1]
# I now store the central coordinate in the file name, and read it out into astropy quantity
# for when I need to define the spectrum object
central_coord = Quantity([float(sp_info[3].strip('ra')), float(sp_info[4].strip('dec'))], 'deg')
# Also read out the inner and outer radii into astropy quantities (I know that
# they will be in degree units).
r_inner = Quantity(np.array(sp_info[5].strip('ri').split('and')).astype(float), 'deg')
r_outer = Quantity(np.array(sp_info[6].strip('ro').split('and')).astype(float), 'deg')
# Check if there is only one r_inner and r_outer value each, if so its a circle
# (otherwise its an ellipse)
if len(r_inner) == 1:
r_inner = r_inner[0]
r_outer = r_outer[0]
# Only check the actual filename, as I have no knowledge of what strings might be in the
# user's path to xga output
if 'grpTrue' in sp.split('/')[-1]:
grp_ind = sp_info.index('grpTrue')
grouped = True
else:
grouped = False
# mincnt or minsn information will only be in the filename if the spectrum is grouped
if grouped and 'mincnt' in sp.split('/')[-1]:
min_counts = int(sp_info[grp_ind+1].split('mincnt')[-1])
min_sn = None
elif grouped and 'minsn' in sp.split('/')[-1]:
min_sn = float(sp_info[grp_ind+1].split('minsn')[-1])
min_counts = None
else:
# We still need to pass the variables to the spectrum definition, even if it isn't
# grouped
min_sn = None
min_counts = None
# Only if oversampling was applied will it appear in the filename
if 'ovsamp' in sp.split('/')[-1]:
over_sample = int(sp_info[-2].split('ovsamp')[-1])
else:
over_sample = None
if "region" in sp.split('/')[-1]:
region = True
else:
region = False
# I split the 'spec' part of the end of the name of the spectrum, and can use the parts of the
# file name preceding it to search for matching arf/rmf files
sp_info_str = sp.split('_spec')[0]
# Fairly self explanatory, need to find all the separate products needed to define an XGA
# spectrum
arf = [f for f in named if "arf" in f and "back" not in f and sp_info_str == f.split('.arf')[0]]
rmf = [f for f in named if "rmf" in f and "back" not in f and sp_info_str == f.split('.rmf')[0]]
# As RMFs can be generated for source and background spectra separately, or one for both,
# we need to check for matching RMFs to the spectrum we found
if len(rmf) == 0:
rmf = [f for f in named if "rmf" in f and "back" not in f and inst in f and "universal" in f]
# Exact same checks for the background spectrum
back = [f for f in named if "backspec" in f and inst in f
and sp_info_str == f.split('_backspec')[0]]
back_arf = [f for f in named if "arf" in f and inst in f
and sp_info_str == f.split('_back.arf')[0] and "back" in f]
back_rmf = [f for f in named if "rmf" in f and "back" in f and inst in f
and sp_info_str == f.split('_back.rmf')[0]]
if len(back_rmf) == 0:
back_rmf = rmf
# If exactly one match has been found for all of the products, we define an XGA spectrum and
# add it the source object.
if len(arf) == 1 and len(rmf) == 1 and len(back) == 1 and len(back_arf) == 1 and len(back_rmf) == 1:
# Defining our XGA spectrum instance
obj = Spectrum(sp, rmf[0], arf[0], back[0], central_coord, r_inner, r_outer, obs_id, inst,
grouped, min_counts, min_sn, over_sample, "", "", "", region, back_rmf[0],
back_arf[0])
if "ident" in sp.split('/')[-1]:
set_id = int(sp.split('ident')[-1].split('_')[0])
ann_id = int(sp.split('ident')[-1].split('_')[1])
obj.annulus_ident = ann_id
obj.set_ident = set_id
if set_id not in ann_spec_constituents:
ann_spec_constituents[set_id] = []
ann_spec_usable[set_id] = True
ann_spec_constituents[set_id].append(obj)
else:
# And adding it to the source storage structure, but only if its not a member
# of an AnnularSpectra
try:
self.update_products(obj)
except NotAssociatedError:
pass
elif len(arf) == 1 and len(rmf) == 1 and len(back) == 1 and len(back_arf) == 0:
# Defining our XGA spectrum instance
obj = Spectrum(sp, rmf[0], arf[0], back[0], central_coord, r_inner, r_outer, obs_id, inst,
grouped, min_counts, min_sn, over_sample, "", "", "", region)
if "ident" in sp.split('/')[-1]:
set_id = int(sp.split('ident')[-1].split('_')[0])
ann_id = int(sp.split('ident')[-1].split('_')[1])
obj.annulus_ident = ann_id
obj.set_ident = set_id
if set_id not in ann_spec_constituents:
ann_spec_constituents[set_id] = []
ann_spec_usable[set_id] = True
ann_spec_constituents[set_id].append(obj)
else:
# And adding it to the source storage structure, but only if its not a member
# of an AnnularSpectra
try:
self.update_products(obj)
except NotAssociatedError:
pass
else:
warnings.warn("{src} spectrum {sp} cannot be loaded in due to a mismatch in available"
" ancillary files".format(src=self.name, sp=sp))
if "ident" in sp.split("/")[-1]:
set_id = int(sp.split('ident')[-1].split('_')[0])
ann_spec_usable[set_id] = False
os.chdir(og_dir)
# Here we will load in existing xga profile objects
os.chdir(OUTPUT + "profiles/{}".format(self.name))
saved_profs = [pf for pf in os.listdir('.') if '.xga' in pf and 'profile' in pf and self.name in pf]
for pf in saved_profs:
with open(pf, 'rb') as reado:
temp_prof = pickle.load(reado)
try:
self.update_products(temp_prof)
except NotAssociatedError:
pass
os.chdir(og_dir)
# If spectra that should be a part of annular spectra object(s) have been found, then I need to create
# those objects and add them to the storage structure
if len(ann_spec_constituents) != 0:
for set_id in ann_spec_constituents:
if ann_spec_usable[set_id]:
ann_spec_obj = AnnularSpectra(ann_spec_constituents[set_id])
if self._redshift is not None:
# If we know the redshift we will add the radii to the annular spectra in proper distance units
ann_spec_obj.proper_radii = self.convert_radius(ann_spec_obj.radii, 'kpc')
self.update_products(ann_spec_obj)
# Here we load in any combined images and exposure maps that may have been generated
os.chdir(OUTPUT + 'combined')
cur_d = os.getcwd() + '/'
# This creates a set of observation-instrument strings that describe the current combinations associated
# with this source, for testing against to make sure we're loading in combined images/expmaps that
# do belong with this source
src_oi_set = set([o+i for o in self._instruments for i in self._instruments[o]])
# Loads in the inventory file for this ObsID
inven = | pd.read_csv("inventory.csv", dtype=str) | pandas.read_csv |
import pandas as pd
import numpy as np
class DataSeriesBase(object):
_name = None # 后面必须先设置
_instrument = None
def __init__(self):
self._dict = {}
def __getitem__(self, key):
return self._dict[self._instrument][key][self._name]
def initialize(self, instrument, initial):
self._dict[instrument] = [{'date': 'start', self._name: initial}] # 初始化数据没用,后面调用的时候都会忽略
def set_instrument(self, instrument):
self._instrument = instrument
def add(self, date, val):
self._dict[self._instrument].append({'date': date, self._name: val})
@property
def dict(self):
return self._dict[self._instrument]
@property
def keys(self):
return self._dict.keys()
@property
def date(self):
return [i['date'] for i in self._dict[self._instrument]]
@property
def list(self):
return [i[self._name] for i in self._dict[self._instrument]]
@property
def df(self):
df = | pd.DataFrame(self._dict[self._instrument][1:]) | pandas.DataFrame |
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import glob
import os
import pandas as pd
from ts_datasets.anomaly.base import TSADBaseDataset
class Synthetic(TSADBaseDataset):
"""
Wrapper to load a sythetically generated dataset.
The dataset was generated using three base time series, each of which
was separately injected with shocks, spikes, dips and level shifts, making
a total of 15 time series (including the base time series without anomalies).
Subsets can are defined by the base time series used ("horizontal",
"seasonal", "upward_downward"), or the type of injected anomaly ("shock",
"spike", "dip", "level"). The "anomaly" subset refers to all times series with
injected anomalies (12) while "base" refers to all time series without them (3).
"""
base_ts_subsets = ["horizontal", "seasonal", "upward_downward"]
anomaly_subsets = ["shock", "spike", "dip", "level", "trend"]
valid_subsets = ["anomaly", "all", "base"] + base_ts_subsets + anomaly_subsets
def __init__(self, subset="anomaly", rootdir=None):
super().__init__()
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
self.subset = subset
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "synthetic_anomaly")
csvs = sorted(glob.glob(f"{rootdir}/*.csv"))
if subset == "base":
csvs = [csv for csv in csvs if "anom" not in os.path.basename(csv)]
elif subset != "all":
csvs = [csv for csv in csvs if "anom" in os.path.basename(csv)]
if subset in self.base_ts_subsets + self.anomaly_subsets:
csvs = [csv for csv in csvs if subset in os.path.basename(csv)]
for csv in csvs:
df = pd.read_csv(csv)
df["timestamp"] = | pd.to_datetime(df["timestamp"], unit="s") | pandas.to_datetime |
# Statistical Programming in Python
from matplotlib import pyplot as plt # Visualization with Python
import seaborn as sns # Statistical data visualization
import numpy as np # Fundamental package for scientific computing with Python
import pandas as pd # Python Data Analysis Library
import os # Miscellaneous operating system interfaces
from pathlib import Path # Object-oriented filesystem paths
print("Hello World")
# -------------------------- Installing and Preparation -----------------------
# Check Python Version -> In eTerminal
## python --version
# If pip isn’t already installed, try to bootstrap it from the standard library:
## pip --version
# python -m ensurepip --default-pip
# Use pip for Installing -> In Terminal
# pip install seaborn
# -------------------------------- Introduction -------------------------------
# load required libraries
# find helps or documentations
help(sns.boxplot)
df = sns.load_dataset('iris') # load iris dataset as dataframe
type(df) # check the type of the dataframe
# ---------------------------- Draw and Show a Plot ---------------------------
# Make boxplot for one group only
plt.figure(figsize=(8, 8), dpi=100) # figsize: width, height in inches
fig1 = sns.boxplot(y=df["sepal_length"]) # make the fig1 as boxplot
plt.show()
# ---------------------------- Draw and Save a Plot ---------------------------
plt.figure(figsize=(8, 8), dpi=100)
fig1 = sns.boxplot(y=df["sepal_length"])
# plt.savefig(
# os.path.join( # absolute path
# 'address goes here',
# 'fig1.pdf'
# ),
# format = 'pdf'
# )
# -------------------------------- Relative Paths -----------------------------
cwd = Path.cwd() # find the current working directory (cwd)
print(cwd)
data_path = (cwd / './data/').resolve() # determine data path
print(data_path)
figures_path = (cwd / './figures/').resolve() # determine figures path
print(figures_path)
# Draw and Save a Plot using Relative Path
plt.figure(figsize=(8, 8), dpi=100)
fig1 = sns.boxplot(y=df["sepal_length"])
plt.savefig(os.path.join(figures_path, 'fig1.pdf'), format='pdf')
# --------------------------- Assignment and Operations ----------------------
weight = 72.0 # weight in kilograms
height = 1.82 # height in meters
BMI = weight/(height**2) # formula to calculate Body Mass Index (kg/m^2)
BMI
values1 = list(range(1, 101)) # create a list
print(values1) # print the list values
np.mean(values1) # find the mean of the list
# Create an array with missing values (i.e., nan)
values2 = np.array([1, np.nan, 3, 4])
print(values2) # print the list values
np.mean(values2) # find the mean of the array
np.nanmean(values2) # find the mean of the array and remove nan
# ---------------------------- Functions and Conditions -----------------------
def BodyMassIndex(weight, height):
if height > 2.5:
raise ValueError('height is not in meters')
else:
return(weight/(height**2))
BodyMassIndex(72, 1.82)
# ----------------------------- Data Types and Classes ------------------------
# -------------------------------- scalar variables ---------------------------
# a float variable is a numeric with fractional partts
float_var = 8.4
type(float_var)
isinstance(float_var, float)
# an ineteger is Positive or negative whole number (without a fractional part)
int_var = 6
type(int_var)
isinstance(int_var, int)
# a string variable is a collection of one or more characters
str_var = "foo"
type(str_var)
isinstance(str_var, str)
# a boolean variable is composed of True/False data
bool_var = True
type(bool_var)
isinstance(bool_var, bool)
# ---------------------------- Dictionaries and Lists -------------------------
# a dictionary is an unordered collection key:value pair of data
dict_var = {1: "a", 2: "b", 3: "c", 4: "d"}
type(dict_var)
isinstance(dict_var, dict)
# a list is an ordered collection of data, not necessarily of the same type
list_var = [1, 2, 3, 4]
type(list_var)
isinstance(list_var, list)
# a tuple is an ordered collection of data, not necessarily of the same type
tuple_var = (1, 2, 3, 4)
type(tuple_var)
isinstance(tuple_var, tuple)
# -------------------------------- Numpy Arrays -------------------------------
float_array = np.array([1, 2.5, 4.5])
float_array.dtype.name
isinstance(float_array[0], float)
isinstance(float_array[0], np.float_)
isinstance(float_array[0], np.double)
str_array = np.array(['1', 'a', 4.5]) # notice type coersion
str_array.dtype.name
isinstance(str_array[0], np.str_)
int_array = np.array([1, 6, 10])
int_array.dtype.name
isinstance(int_array[0], np.int_)
log_array = np.array([True, False, True, False])
log_array.dtype.name
isinstance(log_array[0], np.bool)
isinstance(log_array[0], np.bool8)
isinstance(log_array[0], np.bool_)
len(log_array)
# --------------------------------- DataFrames --------------------------------
df = pd.DataFrame({
'id': list(range(1, 4)),
'gender': pd.Categorical(["m", "f", "m"]),
'fbs': np.array([104, 98, 129])
})
df.shape # dimensions of a dataframe
df.dtypes # data types of variables in a dataframe
df.head() # view just a head of dataframe
df.describe() # describe dataframe
df.columns # shows the column names of datafram
df.T # transpose
type(df) # shows the type of a dataframe
# check if an object is of class pandas dataframe
isinstance(df, pd.core.frame.DataFrame)
# ------------------------ Selection/Subsetting DataFrames --------------------
df.columns[0] # shows the name of the first column of a dataframe
df['id'] # selecting a single column
df[:1] # selecting a single row
# select via the label
df.loc[:, ['gender', 'fbs']] # selecting on a multi-axis by label
df.loc['0':'1', ['id', 'gender']] # showing label slicing
df.at[0, 'fbs']
# select via the position of the passed integers
df.iloc[2] # select data of row with index 2, i.e., 3rd row
df.iloc[0:2, 0:2] # select by integer slices
df.iloc[1:3, :] # slicing rows explicitly
df.iloc[:, 0:2] # slicing columns explicitly
df.iloc[1, 1] # getting a value explicitly
# select via Boolean indexing i.e. conditions
df[df['fbs'] > 100] # using subsetting approach
df.query('fbs > 100') # using query approach
df2 = df.copy() # make a copy of dataframe for further handlings
df2.loc[3] = [4, 'other', np.nan] # add another row to dataframe
df2[df2['gender'].isin(['m', 'f'])] # using the isin() method for filtering
pd.isna(df2) # To get the boolean mask where values are nan (i.e., missing)
df2.dropna(how='any') # To drop any rows that have missing data.
# ---------------------------- Operations on DataFrames ------------------------
df['fbs'].mean() # performing a descriptive statistic:
df['fbs'].median() # median of a variable
df['fbs'].std() # standard deviation of a variable
df['fbs'].min() # minimum of a variable
df['fbs'].sum() # sum of a variable
# applying functions to the data
df.apply(np.cumsum)
# concat dataframes
df3 = pd.DataFrame(np.random.randn(10, 4))
pieces = [df3[:3], df3[3:7], df3[7:]]
df3_concat = pd.concat(pieces)
df3_concat == df3
# join data frames by SQL style merges
left = pd.DataFrame({'key': ['foo', 'bar'], 'lval': [1, 2]})
right = pd.DataFrame({'key': ['foo', 'bar'], 'rval': [4, 5]})
pd.merge(left, right, on='key')
# grouping variables
df4 = pd.DataFrame({
'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)
})
df4.head()
df4.groupby('A').sum()
df4.groupby(['A', 'B']).mean()
df4.groupby(['A', 'B']).size()
df4.groupby('A').agg({'D': 'mean'}) # aggregate method
# mutating/assigning new variables
df4 = df4.assign(E = df4['D'] - df4['C'])
# renaming column/variable names
df4 = df4.rename(columns = {'E': 'Subtract'})
# pivot tables
| pd.pivot_table(df4, values='D', index=['A'], columns=['B']) | pandas.pivot_table |
#!/usr/bin/env python
"""Tests for `pubchem_api` package."""
import os
import numpy as np
import pandas as pd
import scipy
from scipy.spatial import distance
import unittest
# from click.testing import CliRunner
# from structure_prediction import cli
class TestDataPreprocessing(unittest.TestCase):
"""Tests for `data pre-processing` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_001_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a simple test array
"""
print("Test One... To show that distance.pdist function calculates correctly on a simple test array")
test_array_1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
a = np.sqrt(((1-1)**2) + ((2-2)**2) + ((3-3)**2))
b = np.sqrt(((1-4)**2) + ((2-5)**2) + ((3-6)**2))
c = np.sqrt(((1-7)**2) + ((2-8)**2) + ((3-9)**2))
d = np.sqrt(((1-10)**2) + ((2-11)**2) + ((3-12)**2))
e = np.sqrt(((4-1)**2) + ((5-2)**2) + ((6-3)**2))
f = np.sqrt(((4-4)**2) + ((5-5)**2) + ((6-6)**2))
g = np.sqrt(((4-7)**2) + ((5-8)**2) + ((6-9)**2))
h = np.sqrt(((4-10)**2) + ((5-11)**2) + ((6-12)**2))
i = np.sqrt(((7-1)**2) + ((8-2)**2) + ((9-3)**2))
j = np.sqrt(((7-4)**2) + ((8-5)**2) + ((9-6)**2))
k = np.sqrt(((7-7)**2) + ((8-8)**2) + ((9-9)**2))
l = np.sqrt(((7-10)**2) + ((8-11)**2) + ((9-12)**2))
m = np.sqrt(((10-1)**2) + ((11-2)**2) + ((12-3)**2))
n = np.sqrt(((10-4)**2) + ((11-5)**2) + ((12-6)**2))
o = np.sqrt(((10-7)**2) + ((11-8)**2) + ((12-9)**2))
p = np.sqrt(((10-10)**2) + ((11-11)**2) + ((12-12)**2))
result_array = np.array([[a, b, c, d],
[e, f, g, h],
[i, j, k, l],
[m, n, o, p]])
print(result_array)
calculate_distances = distance.pdist(test_array_1, 'euclidean')
make_square = distance.squareform(calculate_distances)
print(make_square)
assert np.array_equal(result_array, make_square)
def test_002_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a simple test array
"""
print(("Test Two... To show that distance.pdist function calculates correctly on a simple test array"))
test_array_1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
a = np.sqrt(((1-1)**2) + ((2-2)**2) + ((3-3)**2))
b = np.sqrt(((1-4)**2) + ((2-5)**2) + ((3-6)**2))
c = np.sqrt(((1-7)**2) + ((2-8)**2) + ((3-9)**2))
d = np.sqrt(((1-10)**2) + ((2-11)**2) + ((3-12)**2))
e = np.sqrt(((4-1)**2) + ((5-2)**2) + ((6-3)**2))
f = np.sqrt(((4-4)**2) + ((5-5)**2) + ((6-6)**2))
g = np.sqrt(((4-7)**2) + ((5-8)**2) + ((6-9)**2))
h = np.sqrt(((4-10)**2) + ((5-11)**2) + ((6-12)**2))
i = np.sqrt(((7-1)**2) + ((8-2)**2) + ((9-3)**2))
j = np.sqrt(((7-4)**2) + ((8-5)**2) + ((9-6)**2))
k = np.sqrt(((7-7)**2) + ((8-8)**2) + ((9-9)**2))
l = np.sqrt(((7-10)**2) + ((8-11)**2) + ((9-12)**2))
m = np.sqrt(((10-1)**2) + ((11-2)**2) + ((12-3)**2))
n = np.sqrt(((10-4)**2) + ((11-5)**2) + ((12-6)**2))
o = np.sqrt(((10-7)**2) + ((11-8)**2) + ((12-9)**2))
p = np.sqrt(((10-10)**2) + ((11-11)**2) + ((12-12)**2))
result_array = np.array([[a, b, c, d],
[e, f, g, h],
[i, j, k, l],
[m, n, o, p]])
calculate_distances = distance.pdist(test_array_1, 'euclidean')
make_square = distance.squareform(calculate_distances)
for i in range(0,result_array.shape[1]):
# print(result_array[i,i])
self.assertEqual(result_array[i,i], 0)
for i in range(0,make_square.shape[1]):
# print(make_square[i,i])
self.assertEqual(make_square[i,i], 0)
def test_003_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a pdb.cif file
"""
print("Test Three... To show that distance.pdist function calculates correctly on a pdb.cif file")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
df_1 = df_1[:-1] # Removes additional row that is included
cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns
critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions
print(critical_info_to_df_3.head())
convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
print(make_square)
assert df_1.shape[0] == cif_to_df_2.shape[0]
assert cif_to_df_2.shape[0] == critical_info_to_df_3.shape[0]
def test_004_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that distance.pdist function calculates correctly on a pdb.cif file
"""
print("Test Four... To show that distance.pdist function calculates correctly on a pdb.cif file")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
df_1 = df_1[:-1] # Removes additional row that is included
cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns
critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions
convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
for i in range(0,make_square.shape[1]):
print(make_square[i,i])
self.assertEqual(make_square[i,i], 0)
def test_005_adjacency_matrix_ok(self):
"""
Tests to ensure that adjacency matrix prepared correctly
To show that adjacency matrix maintains its form when converted back into a dataframe
"""
print("Test Five...")
with open('./extracted_test_data/1j5a.cif') as infile:
target_list = infile.read().split('\n')
df_1 = | pd.DataFrame(data=target_list, columns=["header"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on 2018-09-13
@author: <NAME>
"""
import numpy as np
import pandas as pd
CURRENT_ROUND = 38
# Load data from all 2018 rounds
# Data from https://github.com/henriquepgomide/caRtola
rounds = []
rounds.append( | pd.read_csv('data/rodada-1.csv') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from __future__ import print_function, absolute_import, division
import re
import pandas as pd
from ads.type_discovery import logger
from ads.type_discovery.abstract_detector import AbstractTypeDiscoveryDetector
from ads.type_discovery.typed_feature import GISTypedFeature
class LatLonDetector(AbstractTypeDiscoveryDetector):
_pattern_string = r"^[(]?(\-?\d+\.\d+?),\s*(\-?\d+\.\d+?)[)]?$"
def __init__(self):
self.cc = re.compile(LatLonDetector._pattern_string, re.VERBOSE)
def is_lat_lon(self, name, values):
return all([self.cc.match(str(x)) for x in values])
def discover(self, name, series):
candidates = series.loc[~series.isnull()]
if self.is_lat_lon(name, candidates.head(1000)):
logger.debug("column [{}]/[{}] lat/lon".format(name, series.dtype))
samples = [
tuple([float(x) for x in self.cc.search(v).groups()])
for v in candidates.sample(frac=1).head(500).values
]
return GISTypedFeature.build(name, series, samples)
return False
@staticmethod
def extract_x_y(gis_series):
"""takes a GIS series and parses it into a new dataframe with X (longitude) and Y (latitude) columns."""
cc = re.compile(LatLonDetector._pattern_string, re.VERBOSE)
lats, lons = zip(*gis_series.dropna().apply(lambda v: cc.search(v).groups()))
return pd.DataFrame({"X": lons, "Y": lats}).astype(
{"X": "float64", "Y": "float64"}
)
if __name__ == "__main__":
dd = LatLonDetector()
test_series_1 = [
"-18.2193965, -93.587285",
"-21.0255305, -122.478584",
"85.103913, 19.405744",
"82.913736, 178.225672",
"62.9795085, -66.989705",
"54.5604395, 95.235090",
"33.970775, -140.939679",
"40.9680285, -30.369376",
"51.816119, 175.979008",
"-48.7882365, 84.035621",
]
test_series_2 = [
"69.196241,-125.017615",
"5.2272595,-143.465712",
"-33.9855425,-153.445155",
"43.340610,86.460554",
"24.2811855,-162.380403",
"2.7849025,-7.328156",
"45.033805,157.490179",
"-1.818319,-80.681214",
"-44.510428,-169.269477",
"-56.3344375,-166.407038",
]
test_series_3 = ["(54.3392995,-11.801615)"]
print(dd.discover("test_series_1", pd.Series(test_series_1)))
print(dd.discover("test_series_2", | pd.Series(test_series_2) | pandas.Series |
"""Useful functions for local directory data analysis"""
import os
import os.path
import platform
import numpy as np
import pandas as pd
from collections import OrderedDict
from copy import deepcopy
def _fix_winpath(pathstr):
if platform.system() == 'Windows':
#print("Platform: Windows")
pathstr = '/'.join(pathstr.split('\\'))
return pathstr
fix_winpath = lambda x: list(np.vectorize(_fix_winpath)(x))
def get_device_id(filename):
tags = os.path.basename(filename).split('_')
wafer, chip, device = tags[3], tags[4][-2:], tags[5]
return wafer, chip, device
def pick_datafiles(target, ignore):
"""Filter out non-datafiles from target list."""
# apply ignore list
try:
ignore = fix_winpath(ignore)
except:
ignore = []
try:
target = fix_winpath(target)
except:
target = []
#for ign in ignore:
# del target[target.index(ign)]
target2 = deepcopy(target)
for t in target:
for ign in ignore:
if os.path.split(t)[1] in ign:
del target2[target2.index(t)]
break
return target2
def load_db(dbname):
"""Open csv-like db file and return the dataframe."""
try:
#df = pd.read_table("summary.dat", sep='\s+')
df = pd.read_csv(dbname)
#df["chip"] = df["chip"].astype(str)
print("Loaded existing db:", dbname)
except:
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.pipelines import BaselineBinaryPipeline, BaselineMulticlassPipeline
from evalml.utils import get_random_state
def test_baseline_binary_random(X_y_binary):
X, y = X_y_binary
values = np.unique(y)
parameters = {
"Baseline Classifier": {
"strategy": "random"
}
}
clf = BaselineBinaryPipeline(parameters=parameters)
clf.fit(X, y)
expected_predictions = pd.Series(get_random_state(0).choice(np.unique(y), len(X)), dtype="Int64")
assert_series_equal(expected_predictions, clf.predict(X).to_series())
predicted_proba = clf.predict_proba(X)
assert predicted_proba.shape == (len(X), 2)
expected_predictions_proba = pd.DataFrame(np.array([[0.5 for i in range(len(values))]] * len(X)))
assert_frame_equal(expected_predictions_proba, predicted_proba.to_dataframe())
np.testing.assert_allclose(clf.feature_importance.iloc[:, 1], np.array([0.0] * X.shape[1]))
def test_baseline_binary_random_weighted(X_y_binary):
X, y = X_y_binary
values, counts = np.unique(y, return_counts=True)
percent_freq = counts.astype(float) / len(y)
assert percent_freq.sum() == 1.0
parameters = {
"Baseline Classifier": {
"strategy": "random_weighted"
}
}
clf = BaselineBinaryPipeline(parameters=parameters)
clf.fit(X, y)
expected_predictions = pd.Series(get_random_state(0).choice(np.unique(y), len(X), p=percent_freq), dtype="Int64")
assert_series_equal(expected_predictions, clf.predict(X).to_series())
expected_predictions_proba = pd.DataFrame(np.array([[percent_freq[i] for i in range(len(values))]] * len(X)))
predicted_proba = clf.predict_proba(X)
assert predicted_proba.shape == (len(X), 2)
assert_frame_equal(expected_predictions_proba, predicted_proba.to_dataframe())
np.testing.assert_allclose(clf.feature_importance.iloc[:, 1], np.array([0.0] * X.shape[1]))
def test_baseline_binary_mode():
X = | pd.DataFrame({'one': [1, 2, 3, 4], 'two': [2, 3, 4, 5], 'three': [1, 2, 3, 4]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Hyperparameter Tuning Demo
# ### <NAME>, PhD
# Adapted from:
# * [Sklearn Documentation](https://scikit-learn.org/stable/auto_examples/model_selection/plot_randomized_search.html)
# * [yandexdataschool/mlhep2018 Slides](https://github.com/yandexdataschool/mlhep2018/blob/master/day4-Fri/Black-Box.pdf)
# * [Hyperparameter Optimization in Python Part 1: Scikit-Optimize](https://towardsdatascience.com/hyperparameter-optimization-in-python-part-1-scikit-optimize-754e485d24fe)
# * [An Introductory Example of Bayesian Optimization in Python with Hyperopt](https://towardsdatascience.com/an-introductory-example-of-bayesian-optimization-in-python-with-hyperopt-aae40fff4ff0)
# Install required packages via pip if necessary, only run if you know what you're doing! [Reference](https://jakevdp.github.io/blog/2017/12/05/installing-python-packages-from-jupyter/)
# **Note: This does not use a virtual environment and will pip install directly to your system!**
# In[ ]:
import sys
get_ipython().system('{sys.executable} -m pip install --upgrade pip')
get_ipython().system('{sys.executable} -m pip install -r requirements.txt')
# !{sys.executable} -m pip install -r gentun/requirements.txt
# In[ ]:
# import sys
# !{sys.executable} -m pip uninstall --yes gentun
# In[ ]:
# %%bash
# cd gentun/
# python3 setup.py install
# Check how many cores we have
# In[4]:
import multiprocessing
multiprocessing.cpu_count()
# ### Load packages!
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
########################################################
# python
import pandas as pd
import numpy as np
import warnings
from time import time
# from copy import copy
from collections import OrderedDict
import json
import pickle
from scipy.io import arff
from scipy.stats import randint, uniform
########################################################
# xgboost, sklearn
import xgboost as xgb
warnings.filterwarnings('ignore', message='sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23')
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
# from sklearn.metrics import log_loss
from sklearn.metrics import roc_curve, auc, roc_auc_score
########################################################
# skopt
from skopt import Optimizer
from skopt.learning import GaussianProcessRegressor, RandomForestRegressor, GradientBoostingQuantileRegressor
from skopt.learning.gaussian_process.kernels import RBF, WhiteKernel
from sklearn.ensemble import GradientBoostingRegressor
########################################################
# hyperopt
from hyperopt import hp, tpe, fmin, Trials
########################################################
# plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
########################################################
# set global rnd_seed for reproducibility
rnd_seed = 42
# In[2]:
from utils import * # load some helper functions, but keep main body of code in notebook for easier reading
# In[3]:
from plotting import * # load plotting code
# ### Set number of iterations
# In[4]:
n_iters = {
'RS': 500,
# 'GS': set by the size of the grid
'GP': 300,
'RF': 300,
'GBDT': 300,
'TPE': 300,
# 'GA': 300, # number of generations
}
# all will effectively be multiplied by n_folds
n_folds = 5
# for testing lower iterations and folds
for k,v in n_iters.items():
n_iters[k] = 30
# n_iters['GA'] = 1
n_folds = 2
# Need to implement our own custom scorer to actually use the best number of trees found by early stopping.
# See the [documentation](https://scikit-learn.org/stable/modules/model_evaluation.html#implementing-your-own-scoring-object) for details.
# In[5]:
def xgb_early_stopping_auc_scorer(model, X, y):
# predict_proba may not be thread safe, so copy the object - unfortunately getting crashes so just use the original object
# model = copy.copy(model_in)
y_pred = model.predict_proba(X, ntree_limit=model.best_ntree_limit)
y_pred_sig = y_pred[:,1]
return roc_auc_score(y, y_pred_sig)
# ## Load Polish Companies Bankruptcy Data
# ### [Source and data dictionary](http://archive.ics.uci.edu/ml/datasets/Polish+companies+bankruptcy+data)
# In[6]:
data = arff.loadarff('./data/1year.arff')
df = pd.DataFrame(data[0])
df['class'] = df['class'].apply(int, args=(2,))
# In[7]:
# Real feature names, for reference
with open ('./attrs.json') as json_file:
attrs_dict = json.load(json_file)
# Setup Target and Features
# In[8]:
target='class'
features = sorted(list(set(df.columns)-set([target])))
# Make Train, Validation, and Holdout Sets
# In[9]:
X = df[features].values
y = df[target].values
X_trainCV, X_holdout, y_trainCV, y_holdout = train_test_split(X, y, test_size=0.2, random_state=rnd_seed, stratify=y)
del X; del y;
dm_train = xgb.DMatrix(X_trainCV, label=y_trainCV)
X_train, X_val, y_train, y_val = train_test_split(X_trainCV, y_trainCV, test_size=0.2, random_state=rnd_seed, stratify=y_trainCV)
# Prepare Stratified k-Folds
# In[10]:
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=rnd_seed+2)
# ## Setup Hyperparameter Search Space
# See the [docs here](https://xgboost.readthedocs.io/en/latest/parameter.html) for XGBoost hyperparameter details.
# In[11]:
all_params = OrderedDict({
'max_depth': {'initial': 6, 'range': (3, 10), 'dist': randint(3, 10), 'grid': [4, 6, 8], 'hp': hp.choice('max_depth', [3,4,5,6,7,8,9,10])},
# default=6, Maximum depth of a tree. Increasing this value will make the model more complex and more likely to overfit.
'learning_rate': {'initial': 0.3, 'range': (0.05, 0.6), 'dist': uniform(0.05, 0.6), 'grid': [0.1, 0.15, 0.3], 'hp': hp.uniform('learning_rate', 0.05, 0.6)},
# NOTE: Optimizing the log of the learning rate would be better, but avoid that complexity for this demo...
# default=0.3, Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative. alias: learning_rate
# 'min_child_weight': {'initial': 1., 'range': (1., 10.), 'dist': uniform(1., 10.), 'grid': [1., 3.], 'hp': hp.uniform('min_child_weight', 1., 10.)},
# default=1, Minimum sum of instance weight (hessian) needed in a child. If the tree partition step results in a leaf node with the sum of instance weight less than min_child_weight, then the building process will give up further partitioning. In linear regression task, this simply corresponds to minimum number of instances needed to be in each node. The larger min_child_weight is, the more conservative the algorithm will be.
'gamma': {'initial': 0., 'range': (0., 5.), 'dist': uniform(0., 5.), 'grid': [0., 0.5, 1.], 'hp': hp.uniform('gamma', 0., 5.)},
# default=0, Minimum loss reduction required to make a further partition on a leaf node of the tree. The larger gamma is, the more conservative the algorithm will be. alias: min_split_loss
'reg_alpha': {'initial': 0., 'range': (0., 5.), 'dist': uniform(0., 5.), 'grid': [0., 1.], 'hp': hp.uniform('reg_alpha', 0., 5.)},
# default=0, L1 regularization term on weights. Increasing this value will make model more conservative.
'reg_lambda': {'initial': 1., 'range': (0., 5.), 'dist': uniform(0., 5.), 'grid': [0., 1.], 'hp': hp.uniform('reg_lambda', 0., 5.)},
# default=1, L2 regularization term on weights. Increasing this value will make model more conservative.
# 'max_delta_step': {'initial': 0., 'range': (0., 5.), 'dist': uniform(0., 5.), 'grid': [0., 1.], 'hp': hp.uniform('max_delta_step', 0., 5.)},
# default=0, Maximum delta step we allow each leaf output to be. If the value is set to 0, it means there is no constraint. If it is set to a positive value, it can help making the update step more conservative. Usually this parameter is not needed, but it might help in logistic regression when class is extremely imbalanced. Set it to value of 1-10 might help control the update.
# TODO debug ranges (0, 1) so they are actually working
# 'colsample_bytree': {'initial': 1., 'range': (0.5, 1.), 'dist': uniform(0.5, 1.), 'grid': [0.5, 1.], 'hp': hp.uniform('colsample_bytree', 0.5, 1.)},
# default=1, Subsample ratio of columns when constructing each tree.
# 'subsample': {'initial': 1., 'range': (0.5, 1.), 'dist': uniform(0.5, 1.), 'grid': [0.5, 1.], 'hp': hp.uniform('subsample', 0.5, 1.)},
# default=1, Subsample ratio of the training instances. Setting it to 0.5 means that XGBoost would randomly sample half of the training data prior to growing trees. and this will prevent overfitting. Subsampling will occur once in every boosting iteration.
})
# In[12]:
# break out the params_to_be_opt, and their ranges (dimensions), and initial values
params_to_be_opt = []
dimensions = []
for k,v in all_params.items():
params_to_be_opt.append(k)
dimensions.append(v['range'])
# break out dictionaries for each optimizer
params_initial = {}
param_dists = {}
param_grids = {}
param_hp_dists = OrderedDict()
for k,v in all_params.items():
params_initial[k] = v['initial']
param_dists[k] = v['dist']
param_grids[k] = v['grid']
param_hp_dists[k] = v['hp']
# make helper param index dict
param_index_dict = {}
for iparam, param in enumerate(params_to_be_opt):
param_index_dict[param] = iparam
# #### Set other fixed hyperparameters
# In[13]:
fixed_setup_params = {
'max_num_boost_rounds': 500, # maximum number of boosting rounds to run / trees to create
'xgb_objective': 'binary:logistic', # objective function for binary classification
'xgb_verbosity': 0, # The degree of verbosity. Valid values are 0 (silent) - 3 (debug).
'xgb_n_jobs': -1, # Number of parallel threads used to run XGBoost. -1 makes use of all cores in your system
}
# search_scoring = 'roc_auc' # need to use custom function to work properly with xgb early stopping, see xgb_early_stopping_auc_scorer
search_n_jobs = -1 # Number of parallel threads used to run hyperparameter searches. -1 makes use of all cores in your system
search_verbosity = 1
# In[14]:
fixed_fit_params = {
'early_stopping_rounds': 10, # must see improvement over last num_early_stopping_rounds or will halt
'eval_set': [(X_val, y_val)], # data sets to use for early stopping evaluation
'eval_metric': 'auc', # evaluation metric for early stopping
'verbose': False, # even more verbosity control
}
# ### Setup XGBClassifier
# In[15]:
xgb_model = xgb.XGBClassifier(n_estimators=fixed_setup_params['max_num_boost_rounds'],
objective=fixed_setup_params['xgb_objective'],
verbosity=fixed_setup_params['xgb_verbosity'],
random_state=rnd_seed+3)
# #### Run with initial hyperparameters as a baseline
# In[16]:
model_initial = xgb.XGBClassifier(n_estimators=fixed_setup_params['max_num_boost_rounds'],
objective=fixed_setup_params['xgb_objective'],
verbosity=fixed_setup_params['xgb_verbosity'],
random_state=rnd_seed+3, **params_initial)
model_initial.fit(X_train, y_train, **fixed_fit_params);
# In[17]:
y_initial = -xgb_early_stopping_auc_scorer(model_initial, X_val, y_val)
# # Random Search
# Randomly test different hyperparameters drawn from `param_dists`
# In[22]:
rs = RandomizedSearchCV(estimator=xgb_model, param_distributions=param_dists, scoring=xgb_early_stopping_auc_scorer,
n_iter=n_iters['RS'], n_jobs=search_n_jobs, cv=skf, verbose=search_verbosity, random_state=rnd_seed+4
)
# In[23]:
rs_start = time()
rs.fit(X_trainCV, y_trainCV, groups=None, **fixed_fit_params)
dump_to_pkl(rs, 'RS')
rs_time = time()-rs_start
print(f"RandomizedSearchCV took {rs_time:.2f} seconds for {n_iters['RS']} candidates parameter settings")
# In[34]:
rs = load_from_pkl('RS')
# In[25]:
report(rs)
# In[26]:
output_sklearn_to_csv(rs, params_to_be_opt, tag='_RS')
# In[27]:
plot_convergence(y_values=np.array([-y for y in rs.cv_results_['mean_test_score']]), ann_text='RS', tag='_RS', y_initial=y_initial)
# # Grid Search
# Try all possible hyperparameter combinations `param_grids`, slow and poor exploration!
# In[28]:
gs = GridSearchCV(estimator=xgb_model, param_grid=param_grids, scoring=xgb_early_stopping_auc_scorer,
n_jobs=search_n_jobs, cv=skf, verbose=search_verbosity # , iid=False
)
# In[29]:
gs_start = time()
gs.fit(X_trainCV, y_trainCV, groups=None, **fixed_fit_params)
dump_to_pkl(gs, 'GS')
gs_time = time()-gs_start
print(f"GridSearchCV took {gs_time:.2f} seconds for {len(gs.cv_results_['params'])} candidates parameter settings")
# In[35]:
gs = load_from_pkl('GS')
# In[31]:
report(gs)
# In[32]:
output_sklearn_to_csv(gs, params_to_be_opt, tag='_GS')
# In[33]:
plot_convergence(y_values=[-y for y in gs.cv_results_['mean_test_score']], ann_text='GS', tag='_GS', y_initial=y_initial)
# # Setup datasets and objective function for custom searches
# setup the function to be optimized - without CV
def objective_function(params):
model = xgb.XGBClassifier(n_estimators=fixed_setup_params['max_num_boost_rounds'], objective=fixed_setup_params['xgb_objective'], verbosity=fixed_setup_params['xgb_verbosity'], random_state=rnd_seed+6, **params)
model.fit(X_train_OPT, y_train_OPT, early_stopping_rounds=fixed_fit_params['early_stopping_rounds'], eval_set=[(X_val_OPT, y_val_OPT)], eval_metric=fixed_fit_params['eval_metric'], verbose=fixed_fit_params['verbose'])
best_ntree_limit = model.best_ntree_limit
if best_ntree_limit >= fixed_setup_params['max_num_boost_rounds']:
print(f"Hit max_num_boost_rounds = {fixed_setup_params['max_num_boost_rounds']:d}, model.best_ntree_limit = {best_ntree_limit:d}")
# return the negative auc of the trained model, since Optimizer and hyperopt only minimize
return -xgb_early_stopping_auc_scorer(model, X_val, y_val)
# In[19]:
# setup the function to be optimized
def objective_function(params):
cv = xgb.cv(dict({'objective': fixed_setup_params['xgb_objective']}, **params), dm_train,
num_boost_round=fixed_setup_params['max_num_boost_rounds'], early_stopping_rounds=fixed_fit_params['early_stopping_rounds'],
nfold=n_folds, stratified=True, folds=skf,
metrics=fixed_fit_params['eval_metric'],
verbose_eval=fixed_fit_params['verbose'], seed=rnd_seed+6, as_pandas=True)
# return the negative auc of the trained model, since Optimizer and hyperopt only minimize
return -cv[f"test-{fixed_fit_params['eval_metric']}-mean"].iloc[-1]
# # Bayesian Optimization
# Use Bayesian optimization to intelligently decide where to sample the objective function next, based on prior results.
# Can use many different types of surrogate functions: Gaussian Process, Random Forest, Gradient Boosted Trees. Note that the Tree-Structured Parzen Estimator (TPE) approach is a close cousin of Bayesian optimization, similar in operation but arising from a flipped form of Bayes rule.
# In[20]:
frac_initial_points = 0.1
acq_func='gp_hedge' # select the best of EI, PI, LCB per iteration
# In[21]:
def run_bo(bo_opt, bo_n_iter, ann_text, m_path='output', tag='', params_initial=None, y_initial=None, print_interval=5, debug=False):
iter_results = []
if params_initial is not None and y_initial is not None:
# update bo_opt with the initial point, might as well since we have already computed it!
x_initial = [params_initial[param] for param in params_to_be_opt]
bo_opt.tell(x_initial, y_initial)
initial_row = {'iter':0, 'y':y_initial, 'auc':-y_initial}
for param in params_to_be_opt:
initial_row[param] = params_initial[param]
iter_results.append(initial_row)
# we'll print these warnings ourselves
warnings.filterwarnings('ignore', message='The objective has been evaluated at this point before.')
# run it
for i_iter in range(1,bo_n_iter):
is_last = False
if i_iter+1 == bo_n_iter:
is_last = True
print_this_i = False
if is_last or (print_interval !=0 and (print_interval < 0 or (print_interval > 0 and i_iter % print_interval == 0))):
print_this_i = True
print(f'Starting iteration {i_iter:d}')
# get next test point x, ie a new point beta in parameter space
x = bo_opt.ask()
is_repeat = False
if x in bo_opt.Xi:
is_repeat = True
# we have already evaluated objective_function at this point! Pull old value, give it back and continue.
# not very elegant, might still get a warning from Optimizer, but at least is MUCH faster than recomputing objective_function...
past_i_iter = bo_opt.Xi.index(x)
y = bo_opt.yi[past_i_iter] # get from bo_opt array to be sure it's the right one
if debug:
print('Already evaluated at this x (params below)! Just using the old result for y and continuing!')
print(x)
else:
# run the training and predictions for the test point
params = {}
for param,value in zip(params_to_be_opt,x):
params[param] = value
y = objective_function(params)
# update bo_opt with the result for the test point
bo_opt.tell(x, y)
# save to df
iter_result = {'iter':i_iter, 'y':y, 'auc':-y} # , 'x': str(x)
for param,value in zip(params_to_be_opt,x):
iter_result[param] = value
iter_results.append(iter_result)
# see if it is a min
is_best = False
if i_iter != 0 and y == np.array(bo_opt.yi).min():
is_best = True
# print messages and plots while still running
if print_this_i or is_last or (is_best and not is_repeat):
if is_best:
print('Found a new optimum set of hyper parameters!')
print(x)
print(f'y: {y:.5f}')
df_tmp = | pd.DataFrame.from_dict(iter_results) | pandas.DataFrame.from_dict |
"""
Download data from original sources if they are not already present in the data dir
"""
import argparse
import os
from pathlib import Path
import pandas as pd
import requests
def delete_file(target_dir, filename):
test_path = Path(os.path.join(target_dir, filename))
if test_path.is_file():
os.remove(test_path)
# Download the Gender pay gap data from UK Gov if it's not already there
def download_file_if_not_exist(url, target_dir='data', extension='', filename=None):
local_filename = filename if filename is not None else url.split('/')[-1] + extension
Path(target_dir).mkdir(parents=True, exist_ok=True)
test_path = Path(os.path.join(target_dir, local_filename))
if test_path.is_file():
print("{} already exists in '{}' folder".format(local_filename, target_dir))
return
print("Downloading {} to {}".format(local_filename, target_dir))
with requests.get(url, stream=True) as r:
with test_path.open('wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return local_filename
def download_data():
for year in (2017, 2018, 2019):
download_file_if_not_exist(
url='https://gender-pay-gap.service.gov.uk/viewing/download-data/{}'.format(year),
target_dir='data',
filename="ukgov-gpg-{}.csv".format(year))
SIC_CODES_CSV='https://datahub.io/core/uk-sic-2007-condensed/r/uk-sic-2007-condensed.csv'
download_file_if_not_exist(
url=SIC_CODES_CSV,
target_dir='data',
filename='sic_codes.csv'
)
def merge_years(df2017, df2018, df2019):
df2017['year'] = 2017
df2018['year'] = 2018
df2019['year'] = 2019
return pd.concat([df2017, df2018, df2019])
def acquire_data(save_file=False, output_filename='data/ukgov-gpg-full.csv'):
download_data()
df_2017 = | pd.read_csv('data/ukgov-gpg-2017.csv', dtype={'SicCodes': str}) | pandas.read_csv |
import pandas as pd
import numpy as np
from .icd_parsing_functions import get_code_categories, charlson_calc
def preprocess_all(static_vars, dynamic_vars, outcome_vars, input_vars):
print(dynamic_vars.head())
top_k_feats = list(dynamic_vars['label'].value_counts()[:5].index)
id_vars = ['subject_id','hadm_id','stay_id']
dynamic_regular = get_regular_timeseries(dynamic_vars, id_vars, top_k_feats)
print(dynamic_regular.head())
dynamic_regular_imputed = impute_dynamic_data(dynamic_regular,id_vars, top_k_feats)
print(dynamic_regular_imputed.head())
static_vars_clean = preprocess_static_vars(static_vars)
return static_vars_clean, dynamic_regular_imputed, outcome_vars, input_vars
def preprocess_static_vars(static_vars_df: pd.DataFrame) -> pd.DataFrame:
static_vars_clean = static_vars_df.copy()
static_vars_clean = | pd.get_dummies(static_vars_clean) | pandas.get_dummies |
#Lib for Streamlit
# Copyright(c) 2021 - AilluminateX LLC
# This is main Sofware... Screening and Tirage
# Customized to general Major Activities
# Make all the School Activities- st.write(DataFrame) ==> (outputs) Commented...
# The reason, since still we need the major calculations.
# Also the Computing is not that expensive.. So, no need to optimize at this point
import streamlit as st
import pandas as pd
#Change website title (set_page_config)
#==============
from PIL import Image
image_favicon=Image.open('Logo_AiX.jpg')
st.set_page_config(page_title='AilluminateX - Covid Platform', page_icon = 'Logo_AiX.jpg') #, layout = 'wide', initial_sidebar_state = 'auto'), # layout = 'wide',)
# favicon being an object of the same kind as the one you should provide st.image() with
#(ie. a PIL array for example) or a string (url or local file path)
#==============
#Hide footer and customize the text
#=========================
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
footer:after {
content:'Copyright(c) 2021 - AilluminateX LLC and Ailysium - Covid19 Bio-Forecasting Platform | https://www.aillumiante.com';
visibility: visible;
display: block;
position: relative;
#background-color: gray;
padding: 5px;
top: 2px;
}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#==============================
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from yellowbrick.classifier import ClassificationReport
from sklearn.metrics import accuracy_score
#import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import plotly.express as px
import numpy as np
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import altair as alt
import plotly.figure_factory as ff
import matplotlib
from matplotlib import cm
import seaborn as sns; sns.set()
from PIL import Image
import statsmodels.api as sm
import statsmodels.formula.api as smf
#from sklearn import model_selection, preprocessing, metrics, svm,linear_model
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, cross_validate, StratifiedKFold
from sklearn.feature_selection import SelectKBest, chi2
#from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc, roc_auc_score, roc_curve, explained_variance_score, precision_recall_curve,average_precision_score,accuracy_score, classification_report
#from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import boxcox
from matplotlib import pyplot
import pickle
#from sklearn.externals import joblib
import joblib
# Load Image & Logo
#====================
st.image("Logo_AiX.jpg") # Change to MSpace Logo
#st.write("https://www.ailluminate.com")
#st.image("LogoAiX1.jpg") # Change to MSpace Logo
st.markdown("<h1 style='text-align: left; color: turquoise;'>Ailysium: BioForecast Platform</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Train AI BioForecast Model (Realtime)</h1>", unsafe_allow_html=True)
#st.markdown("<h1 style='text-align: left; color: turquoise;'>Opening-Economy & Society</h1>", unsafe_allow_html=True)
#df_forecast= pd.read_csv("2021-03-27-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
df_forecast=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Load Data - The last/most recent Forecast and latest Data
#=====================
# The last two, most recent forecast
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-15"
#Forecasted_dates=["3/20/2021", "3/27/2021", "4/03/2021", "4/10/2021" ]
#df_forecast= pd.read_csv("2021-03-22-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#Forcast_date="2021-03-22"
#Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
#==========================================
df_forecast_previous= pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
#================
#initialize the data
#=======================
#Models
#====================
#st.success("What Forecast Model Data to Load?")
forecast_model_Options= ['Reference Model',
'Ensemble',
'UGA-CEID',
'Columbia',
'ISU',
'UVA',
'LNQ',
'Facebook',
'JHU-APL',
'UpstateSU',
'JHU-IDD',
'LANL',
'Ensemble']
#st.success("What Date Forecast Data to Load?")
data_dates_options=['2021-01-04', '2021-01-11', '2021-01-18',
'2021-01-25', '2021-02-01', '2021-02-08',
'2021-02-15', '2021-02-22', '2021-03-01',
'2021-03-08', '2021-03-15', '2021-03-22',
'2021-03-29']
data_dates_options=['2021-03-29',
'2021-03-22', '2021-03-15', '2021-03-08',
'2021-03-01', '2021-02-22', '2021-02-15',
'2021-02-08', '2021-02-01', '2021-01-25',
'2021-01-18', '2021-01-11', '2021-01-04']
data_dates_options=['2021-04-14']
load_ai_model_options=['Reference Model',
'AI Model 1',
'AI Model 2 (L)',
'AI Model 3 (Fast)',
'AI Model 4 (Fast) (L)',
'AI Model 5',
'AI Model 6',
'AI Model 7 (VERY Slow- Do Not Use, if You have too!)',
'AI Model 8',
'AI Model 9 (Slow)',
'AI Model 10',
'AI Model 11 (L)',
'AI Model 12',
'AI Model 13',
'AI Model 14 (L)',
'AI Model 15',
'AI Model 16 (L)',
'AI Model (aggregator)']
train_ai_model_options=load_ai_model_options
#===========================
#Selectt Option Section
#============================
select_options=["AiX-ai-Forecast-Platform",
"Load Forecast Data", #Simply Check the Forecast Data
"Load AI Model",
"Train AI Model",
"AiX-Platform"]
select_options=["AiX-ai-Forecast-Platform"]
your_option=select_options
st.sidebar.success("Please Select your Option" )
option_selectbox = st.sidebar.selectbox( "Select your Option:", your_option)
select_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
#if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
if select_Name=='AiX-ai-Forecast-Platform' or select_Name!='AiX-ai-Forecast-Platform':
#Models
#====================
#st.success("What Forecast Model Data to Load?")
your_option=forecast_model_Options
st.sidebar.info("Please Select Forecast Model" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Model:", your_option)
if option_selectbox =='Reference Model':
option_selectbox='Reference Model'
option_selectbox='Ensemble'
forecast_model_Name=option_selectbox
#if option_selectbox=='Load Forecast Data' or option_selectbox!='Load Forecast Data':
if select_Name=='Load Forecast Data' or select_Name!='Load Forecast Data':
#st.success("What Date Forecast Data to Load?")
your_option=data_dates_options
st.sidebar.warning("Please Select Forecast Date" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Date:", your_option)
#if option_selectbox=='2021-03-22':
# option_selectbox= '2021-03-15'
data_dates_Name=option_selectbox
if option_selectbox==data_dates_Name:
your_option=["One(1) Week Ahead", "Two(2) Weeks Ahead", "Three(3) Weeks Ahead", "Four(4) Weeks Ahead"]
st.sidebar.warning("Please Select Forecast Week" )
option_selectbox = st.sidebar.selectbox( "Select Forecast Weeks Ahead:", your_option)
data_week_Name=option_selectbox
if data_week_Name !="One(1) Week Ahead":
st.write("Two(2), Three(3), and Four(4) Weeks Ahead are being calculated offline currently and are not presented as realtime")
#if option_selectbox=='Load AI Model':
if select_Name=='Load AI Model':
your_option=load_ai_model_options
st.sidebar.error("Please Select AI Model to load" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Load:", your_option)
ai_load_Name=option_selectbox
#if option_selectbox=='Train AI Model':
if select_Name=='Train AI Model':
your_option=train_ai_model_options
st.sidebar.success("Please Select AI Model to Train" )
option_selectbox = st.sidebar.selectbox( "Select AI-Model to Train:", your_option)
ai_train_Name=option_selectbox
#load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write("Data to load: ", load_data_csv)
#Load Models and Sidebar Selection
#===================================================================================# Load AI Models
#if option_selectbox=='AiX Platform':
if select_Name=='AiX Platform':
model2load=pd.read_csv('model2load.csv', engine='python', dtype=str) # dtype={"Index": int})
model_index=model2load
model_names_option=model_index.AI_Models.values
st.sidebar.success("Please Select your AI Model!" )
model_selectbox = st.sidebar.selectbox( "Select AI Model", model_names_option)
Model_Name=model_selectbox
Index_model=model2load.Index[model2load.AI_Models==Model_Name].values[0]
Index_model=int(Index_model)
pkl_model_load=model2load.Pkl_Model[model2load.AI_Models==Model_Name].values[0]
#Load Data and Model
Pkl_Filename = pkl_model_load #"Pickle_RForest.pkl"
#st.write(Pkl_Filename)
# Load the Model back from file
#****with open(Pkl_Filename, 'rb') as file: # This line to load the file
#*** Pickle_LoadModel = pickle.load(file) # This line to load the file
# Pickle_RForest = pickle.load(file)
#RForest=Pickle_RForest
load_data_csv=data_dates_Name+"-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
load_data_csv="recent-all-forecasted-cases-model-data.csv"
#st.write('Load CDC Model Data- Data to load:', ' ', load_data_csv)
#Forecast Data is being loaded and alll sort of sidebars also created.
#===================================================
#import pandas as pd
# Load Reference Model Forecast Ensemble - Only For Visualization Purpose
#=============================================================================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_ref=pd.DataFrame()
df_forecast_ref=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_ref.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
#forecast_model_Name="Ensemble"
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_ref=pd.DataFrame()
df_forecast_Ensemble_ref=df_forecast_Ensemble.copy()
# Load Previous Forecast
#=========================
#df_forecast= pd.read_csv("2021-03-15-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_previous=pd.DataFrame()
df_forecast_previous=pd.read_csv("previous-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-22"
Forecasted_dates=["3/27/2021", "4/03/2021", "4/10/2021", "4/17/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_previous.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
df_forecast=df_drop.copy()
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_previous=pd.DataFrame()
df_forecast_Ensemble_previous=df_forecast_Ensemble.copy()
#Load Most Recent Forecast
#====================
#df_forecast= pd.read_csv(load_data_csv, engine='python', dtype={'fips': str})
df_forecast_recent=pd.DataFrame()
df_forecast_recent=pd.read_csv("recent-all-forecasted-cases-model-data.csv", engine='python', dtype={'fips': str})
Forcast_date="2021-03-29"
Forecasted_dates=["4/03/2021", "4/10/2021", "4/17/2021", "4/24/2021" ]
df_forecast=pd.DataFrame()
df_forecast= df_forecast_recent.copy()
df=pd.DataFrame()
df=df_forecast.copy()
# Drop all the States. We are only interested in Counties
df_drop=df[df.location_name!=df.State]
#df_drop1 = df.query("location_name != State")
#df_drop.fips= df_drop.fips.astype(str)
#df_drop.fips= df_drop.fips.astype(str)
#df_forecast_Ensemble=df_forecast[df_forecast.model=="Ensemble"]
df_forecast_Ensemble=pd.DataFrame()
df_forecast_Ensemble=df_forecast[df_forecast.model==forecast_model_Name]
df_forecast_Ensemble=df_forecast_Ensemble[df_forecast_Ensemble.target=="1 wk ahead inc case"]
df_forecast_Ensemble_recent=pd.DataFrame()
df_forecast_Ensemble_recent=df_forecast_Ensemble.copy()
#Load Actual Cases
#==========================
df_actual_cases=pd.DataFrame()
df_actual_cases=pd.read_csv("covid_confirmed_usafacts_forecast.csv", engine='python', dtype={'fips': str})
#======================Visulaization of data =======================
# ======================Compare the Forecast with actula data ================"
df_ref_temp=pd.DataFrame(np.array(df_forecast_Ensemble_ref.iloc[:,[6,7]].values), columns=["fips", "Forecast_Reference"]) # 6,7: fips and point
df_model_temp=pd.DataFrame(np.array(df_forecast_Ensemble_previous.iloc[:,[6,7]].values), columns=["fips", "Forecast_Model"]) # 6,7: fips and point
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-2]].values), columns=["fips", "Actual_Target"]) # 0, -2: fips and most recent actual-target
df_actual_temp=pd.DataFrame(np.array(df_actual_cases.iloc[:,[0,-7,-6,-5,-4,-3, -2]].values),
columns=["fips", "TimeN5", "TimeN4", "TimeN3", "TimeN2", "TimeN1", "Actual_Target"]) # 0, -2: fips and most recent actual-target
#st.write("Last 6 Total Weekly Cases, ", df_actual_temp.head(20))
data_merge= pd.DataFrame() #df_ref_temp.copy()
data_merge= pd.merge(df_ref_temp, df_model_temp, on="fips")
data_merge_left=data_merge.copy()
data_merge= pd.merge(data_merge_left, df_actual_temp, on="fips")
#st.write("df_actual_temp:, ", data_merge.head())
#st.error("Stop for checking how many is loaded")
data_merge.iloc[:,1:] = data_merge.iloc[:,1:].astype(float)
#st.write("Data Merged: ", data_merge.head())
#data_merge = data_merge.iloc[:,[1,2,3]].astype(float)
df_forecast_target=data_merge.copy()
#df_forecast_target_Scaled = df_forecast_target_Scaled.astype(float)
len_data=len(df_forecast_target)
df_population= pd.read_csv("covid_county_population_usafacts.csv", engine='python', dtype={'fips': str, 'fips_1': str})
df_forecast_target_Scaled = df_forecast_target.copy()
i=0
while i <len_data:
fips=df_forecast_target['fips'].iloc[0]
population=df_population.population[df_population.fips==fips].values[0]
df_forecast_target_Scaled.iloc[i,1:]=df_forecast_target.iloc[i,1:]/population*1000
i=i+1
df_forecast_target_Scaled.iloc[:,1:] = df_forecast_target_Scaled.iloc[:,1:].astype(float)
#st.write("df_forecast_target_Scaled", df_forecast_target_Scaled.head())
data_viz=df_forecast_target_Scaled.copy()
#Delete All The Data Frames that we do not need!
#=======================Delete all the DataFrame we do not need ==================
df_forecast_target_Scaled=pd.DataFrame()
data_merge=pd.DataFrame()
df_forecast_target=pd.DataFrame()
df_forecast_Ensemble_previous= | pd.DataFrame() | pandas.DataFrame |
import sys
import numpy as np
import random
import pandas as pd
from dec_reprod import de_uniform
from moead import optimize
instance = int(sys.argv[1])
rep = 51
benchmarks = ["hangseng", "dax", "ftse", "sp", "nikkei"]
savedir = "tmp/{}/unif/".format(benchmarks[instance-1])
N, T, gen = 100, 20, 1500
sigma, nr = 0.9, 2
par = [1]
#print(instance, benchmarks[instance-1])
#print(par)
#print("====================================")
for i in range(rep):
np.random.seed(500+i)
random.seed(500+i)
print("Start {}-th experiment.".format(i+1))
res = optimize(instance, N, T, gen, de_uniform, par, sigma, nr, True, 100)
res = | pd.DataFrame(res, columns=["return", "risk"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# ### Connect to Postgres
# In[2]:
import psycopg2
conn=psycopg2.connect(database="ernie",user="wenxi",host="localhost",password="<PASSWORD>")
conn.set_client_encoding('UTF8')
conn.autocommit=True
curs=conn.cursor()
# In[3]:
curs.execute("SET SEARCH_PATH TO sb_plus;")
# In[4]:
df = pd.read_sql('SELECT * FROM sb_plus_complete_kinetics', conn)
# In[5]:
# Sort dataframe by cited_1, cited_2, co_cited_year and reset index to make sure the table is correct
df = df.sort_values(['cited_1', 'cited_2', 'co_cited_year'])
df = df.reset_index().drop(columns='index')
# In[271]:
df
# ### Filling in missing years between first_possible_year and last_co_cited_year
# In[7]:
# Distinct pairs
pairs = pd.DataFrame(df.groupby(['cited_1','cited_2']).size()).reset_index().drop(columns=0)
pairs
# In[8]:
# number of pairs that have to fillin first year
len(pairs) - len(df[df['first_possible_year'] == df['co_cited_year']].groupby(['cited_1','cited_2']).size())
# In[9]:
x1 = pd.DataFrame(df.groupby(['cited_1','cited_2', 'first_possible_year'])['co_cited_year'].min()).reset_index()
x1
# In[10]:
# add first row for pairs that first_possible_year is not equal to co_cited_year
l = list([x1['cited_1'], x1['cited_2'], x1['co_cited_year'], x1['first_possible_year']])
l_new = [[],[],[],[]]
for i in range(len(l[0])):
if l[2][i] > l[3][i]:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[3][i])
l_new[3].append(l[3][i])
# In[11]:
x2 = pd.DataFrame({'cited_1':l_new[0], 'cited_2':l_new[1], 'co_cited_year':l_new[2], 'first_possible_year':l_new[3]})
x2
# In[12]:
x3 = pd.concat([df, x2], axis=0)
x3
# In[13]:
# Fill in zeros for frequency, next values for all other columns, and sort values by co_cited_year and reset index
x3 = x3.sort_values(['cited_1','cited_2','co_cited_year']).reset_index().drop(columns='index')
x3['frequency'] = x3['frequency'].fillna(0)
x3 = x3.fillna(method='bfill')
# In[14]:
x3
# In[15]:
# Double check the number of pairs is correct
len(x3.groupby(['cited_1','cited_2']).size())
# In[16]:
# Double check all first_possible_year is filled in as the first co_cited_year
len(x3[x3['first_possible_year'] == x3['co_cited_year']])
# In[17]:
l = list([x3['cited_1'], x3['cited_2'], x3['co_cited_year']])
# In[89]:
# Fill in all missing years
import timeit
start = timeit.default_timer()
l_new = [[],[],[]]
for i in range(len(l[0])-1):
if (l[0][i] == l[0][i+1]) & (l[1][i] == l[1][i+1]):
if l[2][i+1] - l[2][i] > 1:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
for j in range(1, (l[2][i+1] - l[2][i])):
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i]+j)
else:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
else:
l_new[0].append(l[0][i])
l_new[1].append(l[1][i])
l_new[2].append(l[2][i])
l_new[0].append(l[0][len(l[0])-1])
l_new[1].append(l[1][len(l[0])-1])
l_new[2].append(l[2][len(l[0])-1])
stop = timeit.default_timer()
# In[90]:
# How long it takes to finish filling in missing years
print(stop-start)
# 227s for 51,613 pairs
# In[91]:
# The number of rows increased because all missing years have been appended
len(l_new[2])
# In[92]:
df_new = pd.DataFrame({'cited_1':l_new[0], 'cited_2':l_new[1], 'co_cited_year':l_new[2]})
# In[93]:
df_new2 = df_new.merge(x3, how='left', on=['cited_1', 'cited_2', 'co_cited_year'])
# In[94]:
# Fill in zeros for frequency
df_new2['frequency'] = df_new2['frequency'].fillna(0)
# In[95]:
# Forward fill in values for all other columns
df_new2 = df_new2.fillna(method='ffill')
# In[96]:
df_new2
# In[97]:
# Recalculate the min_frequency for all pairs since filling missing years will change the min_frequency to be 0
df_new3 = pd.DataFrame(df_new2.groupby(['cited_1', 'cited_2'])['frequency'].min()).reset_index()
x_new = df_new3.merge(df_new2, how='left', on=['cited_1','cited_2']).drop(columns='min_frequency').rename(columns={'frequency_x':'min_frequency','frequency_y':'frequency'})
# In[98]:
x_new
# ### Find Sleeping Beauty Pairs
#
# (1) Minimum sleeping duration = 10 starting from the first_possible_year
#
# (2) During sleeping duration the average co-citation frequency <= 1 and each year co-citation frequency <= 2
#
# (3) Calculate the slope between the first year after sleeping duration and the first peak year
#
# (4) Calculate B_index on pairs
# In[99]:
# Calculate B_index on 51,613 Pairs
x_new
# In[100]:
# add a column of first_possible_year_frequency
pub_freq = x_new[['cited_1', 'cited_2','frequency']][x_new['co_cited_year'] == x_new['first_possible_year']].rename(columns={'frequency':'pub_year_frequency'})
x_new = x_new.merge(pub_freq, how='left', on=['cited_1', 'cited_2'])
# In[101]:
# add a column of max{1, ct}
x_new['max_1_ct'] = np.where(x_new['frequency'] > 1, x_new['frequency'], 1)
# In[102]:
# Calculate b index based on x_new's equation
x_new['b_index'] = (((x_new['max_frequency'] - x_new['pub_year_frequency'])/(x_new['peak_year'] - x_new['first_possible_year'])) * (x_new['co_cited_year'] - x_new['first_possible_year']) + x_new['pub_year_frequency'] - x_new['frequency'])/(x_new['max_1_ct'])
# In[103]:
# Sum across years until peak_year
sb_index = pd.DataFrame(x_new[x_new['co_cited_year'] <= x_new['peak_year']].groupby(['cited_1','cited_2'])['b_index'].sum())
sb_index = sb_index.sort_values('b_index', ascending=False).reset_index()
# In[104]:
sb_index
# In[105]:
# Statistical summary of b_index for all pairs
sb_index['b_index'].describe()
# In[106]:
# Extract sb pairs by applying van Raan's conditions
import warnings
warnings.filterwarnings('ignore')
start = timeit.default_timer()
z = pd.DataFrame(columns=x_new.columns)
for i in range(len(pairs)):
g = x_new[(x_new['cited_1'] == pairs['cited_1'][i]) & (x_new['cited_2'] == pairs['cited_2'][i])]
g = g.reset_index().drop(columns='index')
g['awake_year_index'] = ''
if g.index[g['frequency'] > 2].min() >= 10:
g['awake_year_index'][g.index[g['frequency'] > 2].min()] = 1
if g[0:g.index[g['frequency'] > 2].min()]['frequency'].mean() <= 1:
z = pd.concat([z,g], ignore_index=True)
stop = timeit.default_timer()
# In[107]:
# How long it takes to find sb pairs
print(stop-start)
# 341s for 51,613 pairs
# In[108]:
z1 = z.copy()
# In[109]:
z1
# In[110]:
# Number of pairs that satisfy our stringent conditions for being identified as sleeping beauty co-citation pairs
pairs1 = pd.DataFrame(z1.groupby(['cited_1','cited_2']).size()).reset_index().drop(columns=0)
pairs1
# In[111]:
#zz1 = pd.DataFrame(z1.groupby(['cited_1','cited_2'])['frequency'].sum()).reset_index()
#zz1.to_csv('1196_pairs_frequency.csv')
# In[112]:
# Statistical summary of sb pairs extracted
ss = pairs1.merge(sb_index, how='left', on=['cited_1','cited_2']).sort_values('b_index', ascending=False).reset_index().drop(columns='index')
ss['b_index'].describe()
# In[113]:
z2 = pd.DataFrame(columns=z1.columns)
for i in range(len(pairs1)):
g = z1[(z1['cited_1'] == pairs1['cited_1'][i]) & (z1['cited_2'] == pairs1['cited_2'][i])]
g = g.reset_index().drop(columns='index')
g['awake_year'] = ''
g['awake_frequency'] = ''
tmp1 = g.loc[g['awake_year_index'] == 1, 'co_cited_year'].iloc[0]
tmp2 = g.loc[g['awake_year_index'] == 1, 'frequency'].iloc[0]
g['awake_year'] = tmp1
g['awake_frequency'] = tmp2
z2 = pd.concat([z2,g], ignore_index=True)
# In[114]:
z2 = z2.drop(columns='awake_year_index')
# In[115]:
z2['awake_duration'] = z2['peak_year'] - z2['awake_year']
z2['sleep_duration'] = z2['awake_year'] - z2['first_possible_year']
# In[116]:
# Calculate slope for sb pairs
z2['slope'] = ''
for i in range(len(z2)):
if z2['awake_duration'][i] == 0:
z2['slope'][i] = np.nan
else:
z2['slope'][i] = (z2['max_frequency'][i] - z2['awake_frequency'][i])/z2['awake_duration'][i]
# In[117]:
z2
# In[118]:
# Statistic summary of column slope
zz = pd.DataFrame(z2.groupby(['cited_1','cited_2','slope'])['frequency'].sum()).sort_values('slope', ascending=False).reset_index()
zz
# In[119]:
zz['slope'].describe()
# In[120]:
#zz.to_csv('slope_frequency.csv')
# In[259]:
zz2 = pd.DataFrame(ss.merge(z2, how='outer', on=['cited_1','cited_2']).groupby(['cited_1','cited_2','max_frequency','sleep_duration'])['slope'].max()).reset_index()
zz2
# In[260]:
zz2 = zz2.merge(ss, how='left', on=['cited_1','cited_2'])
zz2.to_csv('1196_pairs_all_values.csv')
# In[137]:
intersect_sb = sb_index.merge(zz2, how='right', on=['cited_1','cited_2'])
intersect_sb
# In[143]:
len(intersect_sb[intersect_sb['b_index'] >= intersect_sb['b_index'].quantile(0.50)])
# In[150]:
#plt.plot(intersect_sb['b_index'])
plt.hist(intersect_sb['b_index'], density=True)
plt.xlabel('beauty coefficient')
# In[146]:
out = pd.DataFrame(z2.groupby(['cited_1','cited_2'])['frequency'].sum()).sort_values('frequency').reset_index()
out
# In[267]:
from sqlalchemy import create_engine
engine = create_engine('postgresql://wenxi:temp_ERNIE_1234@localhost:5432/ernie')
# In[268]:
zz2.head(0).to_sql('sb_1196_pairs_all_values', engine, if_exists='replace',index=False)
# In[270]:
import io
conn = engine.raw_connection()
cur = conn.cursor()
#cur.execute("SET SEARCH_PATH TO sb_plus;")
output = io.StringIO()
zz2.to_csv(output, sep='\t', header=False, index=False)
output.seek(0)
contents = output.getvalue()
cur.copy_from(output, 'sb_1196_pairs_all_values', null="") # null values become ''
conn.commit()
# In[127]:
#zz2 = zz2.merge(ss, how='left', on=['cited_1','cited_2'])
#zz2.to_csv('1196_pairs_all_values.csv')
# In[128]:
# Pairs with slope = NA:
z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2']).size()
# 10 pairs:
# 2580 5944246702 44
# 970456 2364893 33
# 1686592 2364893 31
# 2364893 33744566767 34
# 15231889 17138283 43
# 16262898 18934508 37
# 17769362 18485995 40
# 18039087 18513290 40
# 20909036 84944438568 34
# 41906953 58149417364 39
# In[129]:
# frequency of those pairs with slope = NA
z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2'])['frequency'].sum()
# In[449]:
import matplotlib.pyplot as plt
for i in range(len(pairs1)):
g = z2[(z2['cited_1'] == pairs1['cited_1'][i]) & (z2['cited_2'] == pairs1['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(pairs1['cited_1'][i],pairs1['cited_2'][i]), i+1])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1970, 2019)
plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# ### Generat Plot: 3 Sleeping Beauty Pairs Based on Slope
# In[450]:
na = pd.DataFrame(z2[z2['slope'].isna()].groupby(['cited_1', 'cited_2']).size()).drop(columns=0).reset_index()
na
# In[451]:
# Plot for sb pairs with slope = NA
for i in range(len(na)):
g = z2[(z2['cited_1'] == na['cited_1'][i]) & (z2['cited_2'] == na['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(na['cited_1'][i],na['cited_2'][i])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[452]:
# Plot for sb pairs with min slope
g = z2[z2['slope'] == z2['slope'].min()]
g = g.reset_index().drop(columns='index')
plt.title([(g['cited_1'][0],g['cited_2'][0])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[453]:
mean = pd.DataFrame(z2[(z2['slope'] <= 2.38) & (z2['slope'] >= 2.35)].groupby(['cited_1', 'cited_2']).size()).drop(columns=0).reset_index()
mean
# In[454]:
# Plot for sb pairs with mean slope
for i in range(len(mean)):
g = z2[(z2['cited_1'] == mean['cited_1'][i]) & (z2['cited_2'] == mean['cited_2'][i])]
g = g.reset_index().drop(columns='index')
plt.title([(mean['cited_1'][i],mean['cited_2'][i])])
plt.axvline(g['awake_year'][0])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
#plt.ylim(0, 50)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.axhline(y=2,linestyle='-',color='orange')
plt.legend(['Awake Year', 'Kinetics', 'Frequency = 2'])
plt.show()
# In[258]:
ax_1 = z2[(z2['cited_1'] == 1686592) & (z2['cited_2'] == 2364893)].reset_index().drop(columns='index')
ax_2 = z2[(z2['cited_1'] == 4465903) & (z2['cited_2'] == 6073669)].reset_index().drop(columns='index')
ax_3 = z2[(z2['cited_1'] == 22465686) & (z2['cited_2'] == 22638979)].reset_index().drop(columns='index')
ax_1_awake_x = ax_1['awake_year'][0]
ax_1_awake_y = ax_1['peak_year'][0]
ax_1_peak_x = ax_1['frequency'][ax_1['awake_year'] == ax_1['co_cited_year']].item()
ax_1_peak_y = ax_1['frequency'][ax_1['peak_year'] == ax_1['co_cited_year']].item()
ax_2_awake_x = ax_2['awake_year'][0]
ax_2_awake_y = ax_2['peak_year'][0]
ax_2_peak_x = ax_2['frequency'][ax_2['awake_year'] == ax_2['co_cited_year']].item()
ax_2_peak_y = ax_2['frequency'][ax_2['peak_year'] == ax_2['co_cited_year']].item()
ax_3_awake_x = ax_3['awake_year'][0]
ax_3_awake_y = ax_3['peak_year'][0]
ax_3_peak_x = ax_3['frequency'][ax_3['awake_year'] == ax_3['co_cited_year']].item()
ax_3_peak_y = ax_3['frequency'][ax_3['peak_year'] == ax_3['co_cited_year']].item()
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12,7), sharex=True, sharey=True)
#ax1.set_title('Sleeping Beauty Pair with Largest Slope', fontsize=14)
ax1.plot(ax_1['co_cited_year'], ax_1['frequency'], color='black')
ax1.plot(ax_1['awake_year'][0], 0, marker='^', color='red')
ax1.plot([ax_1_awake_x, ax_1_awake_y], [ax_1_peak_x, ax_1_peak_y], 'blue', linestyle=':',marker='*')
ax1.axhline(y=2, linestyle='-',color='grey')
#ax1.set_xlabel('co_cited_year', fontsize=13)
#ax1.set_ylabel('frequency', fontsize=13)
#ax2.set_title('Sleeping Beauty Pair with Mean Slope', fontsize=14)
ax2.plot(ax_2['co_cited_year'], ax_2['frequency'], color='black')
ax2.plot(ax_2['awake_year'][0], 0, marker='^', color='red')
ax2.plot([ax_2_awake_x, ax_2_awake_y], [ax_2_peak_x, ax_2_peak_y], 'blue', linestyle=':',marker='*', linewidth=4.0)
ax2.axhline(y=2, linestyle='-',color='grey')
#ax2.set_xlabel('co_cited_year', fontsize=13)
#ax2.set_ylabel('frequency', fontsize=13)
#ax3.set_title('Sleeping Beauty Pair with Smal Slope', fontsize=14)
ax3.plot(ax_3['co_cited_year'], ax_3['frequency'], color='black')
ax3.plot(ax_3['awake_year'][0], 0, marker='^', color='red')
ax3.plot([ax_3_awake_x, ax_3_awake_y], [ax_3_peak_x, ax_3_peak_y], 'blue', linestyle=':',marker='*', linewidth=4.0)
ax3.axhline(y=2, linestyle='-',color='grey')
#ax3.set_xlabel('co_cited_year', fontsize=13)
#ax3.set_ylabel('frequency', fontsize=13)
fig.text(0.5, 0.06, 'co_cited_year', ha='center', fontsize=15)
fig.text(0.08, 0.5, 'frequency', va='center', rotation='vertical', fontsize=15)
#fig.tight_layout()
fig.savefig('output.png', dpi=300)
fig.savefig('output.pdf', dpi=300)
fig.savefig('output.tiff', dpi=300)
# In[456]:
#output = pd.concat([ax_1,ax_2,ax_3])
#output.to_csv('sb_plot.csv')
# ### Find a Non-Sleeping Beaty Pair
# In[457]:
kk = x_new.merge(pairs1, how='right', on=['cited_1','cited_2'])
# In[458]:
kk1 = pd.concat([kk, x_new]).drop_duplicates(keep=False)
# In[459]:
kk1_pairs = pd.DataFrame(kk1.groupby(['cited_1','cited_2']).size()).drop(columns=0).reset_index()
kk1_pairs
# In[460]:
# Select one with same scale and looks nice
g = kk1[(kk1['cited_1'] == 20896) & (kk1['cited_2'] == 33845282341)]
g = g.reset_index().drop(columns='index')
plt.title([(kk1_pairs['cited_1'][i],kk1_pairs['cited_2'][i])])
plt.xlabel('co_cited_year')
plt.ylabel('frequency')
plt.xlim(1984, 2019)
plt.ylim(0, 25)
plt.plot(g['co_cited_year'], g['frequency'], color='green')
plt.show()
# In[461]:
#g.to_csv('non_sb_plot.csv')
# ### Generat Plot: 3 Sleeping Beauty Pairs and 3 Non-Sleeping Beauty Pairs Based on Frequency
# In[462]:
# Statistical summary of total frequency for sb pairs
z2.groupby(['cited_1','cited_2'])['frequency'].sum().describe()
# In[463]:
tt = pd.DataFrame(z2.groupby(['cited_1','cited_2'])['frequency'].sum())
tt
# In[464]:
# Max total frequency
tt[tt['frequency'] == tt['frequency'].max()]
# In[465]:
max_freq = z2[(z2['cited_1'] == 16823810) & (z2['cited_2'] == 84965520932)].reset_index().drop(columns='index')
# In[466]:
# Mean total frequency
# tt['frequency'].mean() = 285.947
tt[(tt['frequency'] <= 286) & (tt['frequency'] >= 285)]
# In[467]:
mean_freq = z2[(z2['cited_1'] == 14905513) & (z2['cited_2'] == 21344602)].reset_index().drop(columns='index')
# In[468]:
# Min total frequency
tt[tt['frequency'] == tt['frequency'].min()]
# In[469]:
min_freq = z2[(z2['cited_1'] == 23020183) & (z2['cited_2'] == 25752384)].reset_index().drop(columns='index')
# In[470]:
#sb_total_frequency = pd.concat([max_freq, mean_freq, min_freq])
#sb_total_frequency.to_csv('sb_total_frequency.csv')
# In[471]:
# Statistical summary of total frequency for non-sb pairs
kk1.groupby(['cited_1','cited_2'])['frequency'].sum().describe()
# In[472]:
tt1 = pd.DataFrame(kk1.groupby(['cited_1','cited_2'])['frequency'].sum())
tt1
# In[473]:
# Max total frequency
tt1[tt1['frequency'] == tt1['frequency'].max()]
# In[474]:
max_freq1 = kk1[(kk1['cited_1'] == 189651) & (kk1['cited_2'] == 345491105)].reset_index().drop(columns='index')
# In[475]:
# Mean total frequency
# tt1['frequency'].mean() = 265.128
tt1[(tt1['frequency'] <= 265.13) & (tt1['frequency'] >= 265)]
# In[476]:
mean_freq1 = kk1[(kk1['cited_1'] == 194808) & (kk1['cited_2'] == 33744541191)].reset_index().drop(columns='index')
# In[477]:
# Min total frequency
tt1[tt1['frequency'] == tt1['frequency'].min()]
# In[478]:
min_freq1 = kk1[(kk1['cited_1'] == 24444465579) & (kk1['cited_2'] == 33744749965)].reset_index().drop(columns='index')
# In[479]:
#non_sb_total_frequency = pd.concat([max_freq1, mean_freq1, min_freq1])
#non_sb_total_frequency.to_csv('non_sb_total_frequency.csv')
# In[480]:
# g = kk1[(kk1['cited_1'] == 45149145322) & (kk1['cited_2'] == 85011817347)]
# g = g.reset_index().drop(columns='index')
# plt.xlabel('co_cited_year')
# plt.ylabel('frequency')
# plt.xlim(1975, 2019)
# #plt.ylim(0, 25)
# plt.plot(g['co_cited_year'], g['frequency'], color='green')
# plt.show()
# ### Get Individual Publications of Sb Pairs
# In[481]:
z2
# In[482]:
single_pub = pd.DataFrame(set(z2['cited_1'].unique().tolist() + z2['cited_2'].unique().tolist()))
single_pub.columns = ['cited_paper']
# In[483]:
len(single_pub)
# In[484]:
# Calculate kinetics of individual publications by Neo4j: ERNIE-Neo4j-sb-plus-kinetics-single-pub
single_pub.to_csv('single_pub.csv', index=False)
# ### Read in Kinetics of Individual Publications of Sb Pairs that Calculated by Neo4j and Do Pre-processing Step
# In[21]:
sp = pd.read_csv('single_pub_kinetics.csv')
sp
# In[22]:
len(sp.cited_paper.unique()) # double check the number of single publication is correct
# #### Filling in missing years between first year and last year
# In[23]:
# Distinct Pairs
p = pd.DataFrame(sp.groupby(['cited_paper']).size()).reset_index().drop(columns=0)
# In[24]:
p
# In[25]:
# Number of single publications that needs to fill in the first year
len(p) - len(sp[sp['year'] == sp['pub_year']])
# In[26]:
k = pd.DataFrame(sp.groupby(['cited_paper', 'pub_year'])['year'].min()).reset_index()
k
# In[27]:
k1 = list([k['cited_paper'], k['year'], k['pub_year']])
# In[28]:
# add first row for pairs that first_possible_year is not equal to co_cited_year
start = timeit.default_timer()
k_new = [[],[],[]]
for i in range(len(k1[0])):
if k1[1][i] > k1[2][i]:
k_new[0].append(k1[0][i])
k_new[1].append(k1[2][i])
k_new[2].append(k1[2][i])
stop = timeit.default_timer()
# In[29]:
# how long it takes to fill in missing years
stop - start
# 0.04s for 1267 publications
# In[30]:
k_new2 = pd.DataFrame({'cited_paper':k_new[0], 'year':k_new[1], 'pub_year':k_new[2]})
# In[31]:
k2 = | pd.concat([sp, k_new2], axis=0) | pandas.concat |
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.transforms import BlendedGenericTransform
import matplotlib.gridspec as gridspec
import pickle as pic
import sys, os
import numpy as np
import math as math
import pandas as pd
import seaborn as sns
HYP = [1.1, 1.2, 1.4, 1.6, 1.8, 1.9]
BETA = 0.0
it = 1000
rand = 100
S = 2
M = [2, 4]
init = "circ"
datas = pd.DataFrame(columns=['alpha', 'M', 'error_name', 'score'])
EST_DIR_A = "/home/mafontai/Documents/project/git_project/speech_separation/alpha_SpatialMNMF/results_" + str(S) + "toy/dev/"
sns.set_style("whitegrid")
# cste = 12
# params = {
# 'backend': 'ps',
# 'axes.labelsize': cste,
# 'font.size': cste,
# 'legend.fontsize': cste,
# 'xtick.labelsize': cste,
# 'ytick.labelsize': cste,
# 'text.usetex': True,
# 'font.family': 'serif',
# 'font.serif': 'ptmrr8re',
# }
#
# sns.set_style("whitegrid", {
# 'pgf.texsystem': 'xelatex', # pdflatex, xelatex, lualatex
# 'text.usetex': True,
# 'font.family': 'serif',
# 'axes.labelsize': cste,
# 'legend.labelspacing':0,
# 'legend.borderpad':0,
# 'font.size': cste,
# 'legend.fontsize': cste,
# 'xtick.labelsize': cste,
# 'ytick.labelsize': cste,
# 'font.serif': [],
# })
# plt.rcParams.update(params)
fig_width_pt = 400.6937 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
golden_mean = (math.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = fig_width * golden_mean # height in inches
fig_size = np.array([2. * fig_width, 2. * fig_height])
fig_size2 = np.array([fig_width, fig_height + 0.042 * fig_height])
height = 5
capsize = 0.8
errwidth = 0
aspect = 1.
#
# for n_al, alpha in enumerate(HYP):
# for n_m, mic in enumerate(M):
# for id in range(2, rand - 10):
# SAVE_PATH_A = os.path.join(EST_DIR_A, "alpha=%s" % str(alpha), "beta=%s" % str(BETA))
# file_path = os.path.join(SAVE_PATH_A, "alpha_SpatialMNMF-parameters-M={}-S={}-it={}-init={}-rand={}-ID=test.npz").format(
# str(mic), str(S), str(it), init, str(id))
# file = np.load(file_path)
# print('{}'.format(file_path))
# lambda_NT = file['lambda_NT']
# lambda_true_NT = file['lambda_true_NT']
# SM_NP = file['SM_NP']
# SM_true_NP = file['SM_true_NP']
# Y_true_NTM = file['Y_true_NTM']
# Y_NTM = file['Y_NTM']
#
# SM_error = 100. * (np.abs(SM_true_NP-SM_NP).sum(axis=0).mean() / np.abs(SM_true_NP+ 1e-14).sum(axis=0).mean())
# PSD_error = 100. * (np.abs(lambda_true_NT - lambda_NT).sum(axis=0).mean() / np.abs(lambda_true_NT + 1e-14).sum(axis=0).mean())
# DATA_error = 100. * (np.abs(Y_true_NTM-Y_NTM).sum(axis=0).mean() / np.abs(Y_true_NTM + 1e-14).sum(axis=0).mean())
#
# dict_SM_error = {'alpha':alpha,
# 'M': mic,
# 'error_name': "SM_error",
# 'score': SM_error
# }
#
# dict_PSD_error = {'alpha':alpha,
# 'M': mic,
# 'error_name': "PSD_error",
# 'score': PSD_error
# }
#
# dict_DATA_error = {'alpha':alpha,
# 'M': mic,
# 'error_name': "DATA_error",
# 'score': DATA_error
# }
# datas = datas.append(dict_SM_error,
# ignore_index=True)
# datas = datas.append(dict_PSD_error,
# ignore_index=True)
# datas = datas.append(dict_DATA_error,
# ignore_index=True)
#
# datas.to_pickle('./results_alphaSpatialtoy.pic')
datas = | pd.read_pickle("./results_alphaSpatialtoy.pic") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Test data
"""
# Imports
import pandas as pd
from edbo.feature_utils import build_experiment_index
# Build data sets from indices
def aryl_amination(aryl_halide='ohe', additive='ohe', base='ohe', ligand='ohe', subset=1):
"""
Load aryl amination data with different features.
"""
# SMILES index
index = pd.read_csv('data/aryl_amination/experiment_index.csv')
# Choose supset:
ar123 = ['FC(F)(F)c1ccc(Cl)cc1','FC(F)(F)c1ccc(Br)cc1','FC(F)(F)c1ccc(I)cc1']
ar456 = ['COc1ccc(Cl)cc1','COc1ccc(Br)cc1','COc1ccc(I)cc1']
ar789 = ['CCc1ccc(Cl)cc1','CCc1ccc(Br)cc1','CCc1ccc(I)cc1']
ar101112 = ['Clc1ccccn1','Brc1ccccn1','Ic1ccccn1']
ar131415 = ['Clc1cccnc1','Brc1cccnc1','Ic1cccnc1']
def get_subset(ar):
a = index[index['Aryl_halide_SMILES'] == ar[0]]
b = index[index['Aryl_halide_SMILES'] == ar[1]]
c = index[index['Aryl_halide_SMILES'] == ar[2]]
return pd.concat([a,b,c])
if subset == 1:
index = get_subset(ar123)
elif subset == 2:
index = get_subset(ar456)
elif subset == 3:
index = get_subset(ar789)
elif subset == 4:
index = get_subset(ar101112)
elif subset == 5:
index = get_subset(ar131415)
# Aryl halide features
if aryl_halide == 'dft':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_dft.csv')
elif aryl_halide == 'mordred':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_mordred.csv')
elif aryl_halide == 'ohe':
aryl_features = pd.read_csv('data/aryl_amination/aryl_halide_ohe.csv')
# Additive features
if additive == 'dft':
add_features = pd.read_csv('data/aryl_amination/additive_dft.csv')
elif additive == 'mordred':
add_features = pd.read_csv('data/aryl_amination/additive_mordred.csv')
elif additive == 'ohe':
add_features = pd.read_csv('data/aryl_amination/additive_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/aryl_amination/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/aryl_amination/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/aryl_amination/base_ohe.csv')
# Ligand features
if ligand == 'Pd(0)-dft':
ligand_features = pd.read_csv('data/aryl_amination/ligand-Pd(0)_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/aryl_amination/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/aryl_amination/ligand_ohe.csv')
# Build the descriptor set
index_list = [index['Aryl_halide_SMILES'],
index['Additive_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES']]
lookup_table_list = [aryl_features,
add_features,
base_features,
ligand_features]
lookup_list = ['aryl_halide_SMILES',
'additive_SMILES',
'base_SMILES',
'ligand_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield'].values
return experiment_index
def suzuki(electrophile='ohe', nucleophile='ohe', base='ohe', ligand='ohe', solvent='ohe'):
"""
Load Suzuki data with different features.
"""
# SMILES index
index = pd.read_csv('data/suzuki/experiment_index.csv')
# Electrophile features
if electrophile == 'dft':
elec_features = pd.read_csv('data/suzuki/electrophile_dft.csv')
elif electrophile == 'mordred':
elec_features = pd.read_csv('data/suzuki/electrophile_mordred.csv')
elif electrophile == 'ohe':
elec_features = pd.read_csv('data/suzuki/electrophile_ohe.csv')
# Nucleophile features
if nucleophile == 'dft':
nuc_features = pd.read_csv('data/suzuki/nucleophile_dft.csv')
elif nucleophile == 'mordred':
nuc_features = pd.read_csv('data/suzuki/nucleophile_mordred.csv')
elif nucleophile == 'ohe':
nuc_features = pd.read_csv('data/suzuki/nucleophile_ohe.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/suzuki/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/suzuki/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/suzuki/base_ohe.csv')
# Ligand features
if ligand == 'random-dft':
ligand_features = pd.read_csv('data/suzuki/ligand-random_dft.csv')
elif ligand == 'boltzmann-dft':
ligand_features = pd.read_csv('data/suzuki/ligand-boltzmann_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/suzuki/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = pd.read_csv('data/suzuki/ligand_ohe.csv')
# Solvent features
if solvent == 'dft':
solvent_features = pd.read_csv('data/suzuki/solvent_dft.csv')
elif solvent == 'mordred':
solvent_features = pd.read_csv('data/suzuki/solvent_mordred.csv')
elif solvent == 'ohe':
solvent_features = pd.read_csv('data/suzuki/solvent_ohe.csv')
# Build the descriptor set
index_list = [index['Electrophile_SMILES'],
index['Nucleophile_SMILES'],
index['Base_SMILES'],
index['Ligand_SMILES'],
index['Solvent_SMILES']]
lookup_table_list = [elec_features,
nuc_features,
base_features,
ligand_features,
solvent_features]
lookup_list = ['electrophile_SMILES',
'nucleophile_SMILES',
'base_SMILES',
'ligand_SMILES',
'solvent_SMILES']
experiment_index = build_experiment_index(index['entry'],
index_list,
lookup_table_list,
lookup_list)
experiment_index['yield'] = index['yield']
return experiment_index
def direct_arylation(base='ohe', ligand='ohe', solvent='ohe'):
"""
Load direct arylation data with different features.
"""
# SMILES index
index = pd.read_csv('data/direct_arylation/experiment_index.csv')
# Base features
if base == 'dft':
base_features = pd.read_csv('data/direct_arylation/base_dft.csv')
elif base == 'mordred':
base_features = pd.read_csv('data/direct_arylation/base_mordred.csv')
elif base == 'ohe':
base_features = pd.read_csv('data/direct_arylation/base_ohe.csv')
# Ligand features
if ligand == 'random-dft':
ligand_features = pd.read_csv('data/direct_arylation/ligand-random_dft.csv')
elif ligand == 'boltzmann-dft':
ligand_features = pd.read_csv('data/direct_arylation/ligand-boltzmann_dft.csv')
elif ligand == 'mordred':
ligand_features = pd.read_csv('data/direct_arylation/ligand_mordred.csv')
elif ligand == 'ohe':
ligand_features = | pd.read_csv('data/direct_arylation/ligand_ohe.csv') | pandas.read_csv |
import collections
from datetime import timedelta
from io import StringIO
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import needs_i8_conversion
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
Timedelta,
TimedeltaIndex,
)
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_value_counts(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.value_counts()
counter = collections.Counter(obj)
expected = Series(dict(counter.most_common()), dtype=np.int64, name=obj.name)
expected.index = expected.index.astype(obj.dtype)
if isinstance(obj, pd.MultiIndex):
expected.index = Index(expected.index)
# TODO: Order of entries with the same count is inconsistent on CI (gh-32449)
if obj.duplicated().any():
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_null(null_obj, index_or_series_obj):
orig = index_or_series_obj
obj = orig.copy()
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(orig, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
counter = collections.Counter(obj.dropna())
expected = Series(dict(counter.most_common()), dtype=np.int64)
expected.index = expected.index.astype(obj.dtype)
result = obj.value_counts()
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
# can't use expected[null_obj] = 3 as
# IntervalIndex doesn't allow assignment
new_entry = Series({np.nan: 3}, dtype=np.int64)
expected = expected.append(new_entry)
result = obj.value_counts(dropna=False)
if obj.duplicated().any():
# TODO:
# Order of entries with the same count is inconsistent on CI (gh-32449)
expected = expected.sort_index()
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_value_counts_inferred(index_or_series):
klass = index_or_series
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = | Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"]) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.