prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import datetime
import os.path
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import seaborn as sns
class StatusTypes:
backlog = "backlog"
accepted = "accepted"
complete = "complete"
def extend_dict(d, e):
r = d.copy()
r.update(e)
return r
def to_json_string(value):
if isinstance(value, pd.Timestamp):
return value.strftime("%Y-%m-%d")
if value in (None, np.NaN, pd.NaT):
return ""
try:
return str(value)
except TypeError:
return value
def get_extension(filename):
return os.path.splitext(filename)[1].lower()
def to_days_since_epoch(d):
return (d - datetime.date(1970, 1, 1)).days
class Chart:
_current_palette = None
@classmethod
def set_style(cls, context=None, style=None, palette=None, despine=True):
"""Defines chart style to use. By default, it is optimized for display
and printer, the `despine` value is used to remove the contour.
"""
if context is None:
context = "paper"
if style is None:
style = "darkgrid"
if palette is not None and len(palette) == 1:
palette = palette[0]
cls._current_palette = palette
sns.set(context=context, style=style, palette=cls._current_palette)
if despine:
sns.despine()
@classmethod
def use_palette(cls, palette=None, n_colors=None):
"""Defines the color palette to use and the number of colors in the
palette and return it to use with `with`.
"""
if palette is None:
palette = cls._current_palette
elif len(palette) == 1:
palette = palette[0]
return sns.color_palette(palette=palette, n_colors=n_colors)
def filter_by_columns(df, output_columns):
"""To restrict (and order) the value columns, pass a list of valid values
as `output_columns`.
"""
if output_columns:
return df[[s for s in output_columns if s in df.columns]]
return df
def filter_by_threshold(df, threshold):
"""To restrict (and order) the value columns, pass a threshold in percent
to filter. All columns under the threshold will be put in `Others` column.
"""
if threshold:
total = df.sum(axis=1)
threshold_mask = (df * 100.0 / total[1] < threshold).all()
df["Others"] = df.loc[:, threshold_mask].sum(axis=1)
threshold_mask["Others"] = False
return df.loc[:, ~threshold_mask]
return df
def filter_by_window(df, window):
"""To restrict to last N rows."""
if window:
return df[-window:]
return df
def sort_colums_by_last_row(df, ascending=False):
"""Reorder columns based on values of last row."""
if len(df.index) > 0 and len(df.columns) > 0:
return df.sort_values(
by=df.last_valid_index(), axis=1, ascending=ascending
)
return df
def breakdown_by_month(
df, start_column, end_column, key_column, value_column, aggfunc="count"
):
"""If `df` is a DataFrame of items that are valid/active between the
timestamps stored in `start_column` and `end_column`, and where each item
is uniquely identified by `key_column` and has a categorical value in
`value_column`, return a new DataFrame counting the number of items in
each month broken down by each unique value in `value_column`.
"""
def build_df(t):
start_date = getattr(t, start_column)
end_date = getattr(t, end_column)
key = getattr(t, key_column)
value = getattr(t, value_column)
if end_date is pd.NaT:
end_date = pd.Timestamp.today()
first_month = (
start_date.normalize().to_period("M").to_timestamp("D", "S")
)
last_month = end_date.normalize().to_period("M").to_timestamp("D", "S")
index = pd.date_range(first_month, last_month, freq="MS")
return pd.DataFrame(index=index, data=[[key]], columns=[value])
return (
pd.concat([build_df(t) for t in df.itertuples()], sort=True)
.resample("MS")
.agg(aggfunc)
)
def breakdown_by_month_sum_days(
df, start_column, end_column, value_column, aggfunc="sum"
):
"""If `df` is a DataFrame of items that are valid/active between the
timestamps stored in `start_column` and `end_column`, and where each has a
categorical value in `value_column`, return a new DataFrame summing the
overlapping days of items in each month broken down by each unique value in
`value_column`.
"""
def build_df(t):
start_date = getattr(t, start_column)
end_date = getattr(t, end_column)
value = getattr(t, value_column)
if end_date is pd.NaT:
end_date = pd.Timestamp.today()
days_range = pd.date_range(start_date, end_date, freq="D")
first_month = (
start_date.normalize().to_period("M").to_timestamp("D", "S")
)
last_month = end_date.normalize().to_period("M").to_timestamp("D", "S")
index = | pd.date_range(first_month, last_month, freq="MS") | pandas.date_range |
import pandas as pd
from scripts.python.routines.manifest import get_manifest
import numpy as np
import os
from scripts.python.pheno.datasets.filter import filter_pheno, get_passed_fields
from scipy.stats import spearmanr
import matplotlib.pyplot as plt
from scripts.python.pheno.datasets.features import get_column_name, get_status_dict, get_sex_dict
from matplotlib import colors
from scipy.stats import mannwhitneyu
import plotly.graph_objects as go
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.violin import add_violin_trace
from scripts.python.routines.plot.box import add_box_trace
from scripts.python.routines.plot.layout import add_layout
import pathlib
import seaborn as sns
from sklearn.preprocessing import StandardScaler
import plotly.express as px
from functools import reduce
import plotly
from sklearn.decomposition import PCA
from scripts.python.routines.manifest import get_manifest
from scripts.python.routines.plot.save import save_figure
from scripts.python.routines.plot.histogram import add_histogram_trace
from scripts.python.routines.plot.layout import add_layout
dataset = "GSEUNN"
path = f"E:/YandexDisk/Work/pydnameth/datasets"
datasets_info = pd.read_excel(f"{path}/datasets.xlsx", index_col='dataset')
platform = datasets_info.loc[dataset, 'platform']
manifest = get_manifest(platform)
status_col = get_column_name(dataset, 'Status').replace(' ','_')
age_col = get_column_name(dataset, 'Age').replace(' ','_')
sex_col = get_column_name(dataset, 'Sex').replace(' ','_')
status_dict = get_status_dict(dataset)
status_passed_fields = status_dict['Control'] + status_dict['Case']
sex_dict = get_sex_dict(dataset)
path_save = f"{path}/{platform}/{dataset}/special/010_immuno_part3_and_part4_merge_with_age_and_sex"
pathlib.Path(f"{path_save}/figs").mkdir(parents=True, exist_ok=True)
continuous_vars = {}
categorical_vars = {status_col: [x.column for x in status_passed_fields], sex_col: list(sex_dict.values())}
pheno = pd.read_pickle(f"{path}/{platform}/{dataset}/pheno_xtd.pkl")
pheno = filter_pheno(dataset, pheno, continuous_vars, categorical_vars)
immuno3 = pd.read_excel(f"{path}/{platform}/{dataset}/data/immuno/part3.xlsx", index_col='Sample')
immuno4 = pd.read_excel(f"{path}/{platform}/{dataset}/data/immuno/part4.xlsx", index_col='Sample')
immuno4 = immuno4.loc[immuno4.index.str.match(r'(L|F|I|A|S)*', na=False), :]
coomon_samples = set(immuno3.index.values).intersection(set(immuno4.index.values))
if len(coomon_samples) > 0:
print(f"Subjects with common ids:")
print(coomon_samples)
immuno = pd.concat([immuno3, immuno4])
ages_sexes = | pd.read_excel(f"{path}/{platform}/{dataset}/data/age_sex_L_H_A_Q_I_S_T.xlsx", index_col='Code') | pandas.read_excel |
# This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 <NAME>
import os
import numpy as np
import statsmodels.stats.multitest
# import matplotlib
# BACKEND = 'Agg'
# if matplotlib.get_backend().lower() != BACKEND.lower():
# # If backend is not set properly a call to describe will hang
# matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import gridspec
import pandas as pd
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_STRAND
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING
from SigProfilerTopography.source.commons.TopographyCommons import LEADING
from SigProfilerTopography.source.commons.TopographyCommons import six_mutation_types
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SCATTER_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import CIRCLE_BAR_PLOTS
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLES
from SigProfilerTopography.source.commons.TopographyCommons import TABLES
from SigProfilerTopography.source.commons.TopographyCommons import SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_P_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC_Q_VALUE
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_REAL_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import LEADING_SIMULATIONS_MEAN_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import GENIC
from SigProfilerTopography.source.commons.TopographyCommons import INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import percentage_numbers
from SigProfilerTopography.source.commons.TopographyCommons import percentage_strings
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_10_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_20_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_30_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_50_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_75_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import AT_LEAST_100_PERCENT_DIFF
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT
from SigProfilerTopography.source.commons.TopographyCommons import EXCEL_FILES
from SigProfilerTopography.source.commons.TopographyCommons import write_excel_file
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT
SIGNATURE = 'signature'
CANCER_TYPE = 'cancer_type'
MUTATION_TYPE = 'mutation_type'
TYPE = 'type'
SIGNIFICANT_STRAND = 'significant_strand'
SIGNIFICANCE_LEVEL = 0.05
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofSubsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2NumberofIndelsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2NumberofDinucsDictFilename
from SigProfilerTopography.source.commons.TopographyCommons import getSample2SubsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import getSample2IndelsSignature2NumberofMutationsDict
from SigProfilerTopography.source.commons.TopographyCommons import Sample2DinucsSignature2NumberofMutationsDictFilename
transcriptionStrands = [TRANSCRIBED_STRAND, UNTRANSCRIBED_STRAND]
genicVersusIntergenicStrands=[GENIC, INTERGENIC]
replicationStrands = [LAGGING, LEADING]
########################################################################
#New way
#For Mutation Types
def plot_mutation_types_transcription_log10_ratio_replication_log_10_ratio_using_dataframes(sample,numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
outputDir, jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
# type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]
#
# type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
# 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
# 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
# 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
########################################################################
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
##################################################################
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['Transcribed_real_count'].values.size>0):
transcribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==mutationType]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count= type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == mutationType]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count>0 and untranscribed_real_count>0):
transcriptionRatiosDict[mutationType] = np.log10(transcribed_real_count/untranscribed_real_count)
##################################################################
##################################################################
lagging_real_count = 0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values.size > 0):
lagging_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values.size > 0):
leading_real_count = type_lagging_versus_leading_df[type_lagging_versus_leading_df['type'] == mutationType]['Leading_real_count'].values[0]
if (lagging_real_count>0 and leading_real_count>0):
replicationRatiosDict[mutationType] = np.log10(lagging_real_count/leading_real_count)
##################################################################
##################################################################
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType], transcriptionRatiosDict[mutationType], label=mutationType)
##################################################################
########################################################################
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#Old way
#For Mutation Types
def plot_ncomms11383_Supp_FigG_AllMutationTypes_TranscriptionLog10Ratio_ReplicationLog10Ratio(sample,numberofMutations,type2TranscriptionStrand2CountDict,type2ReplicationStrand2CountDict,outputDir,jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)',fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
# plt.tick_params(
# axis='y', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# left='off' # ticks along the bottom edge are off
# )
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
########################################################################
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
if (mutationType in type2TranscriptionStrand2CountDict) and (mutationType in type2ReplicationStrand2CountDict):
if ((TRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType]) and (UNTRANSCRIBED_STRAND in type2TranscriptionStrand2CountDict[mutationType])):
transcriptionRatiosDict[mutationType]= np.log10(type2TranscriptionStrand2CountDict[mutationType][TRANSCRIBED_STRAND]/type2TranscriptionStrand2CountDict[mutationType][UNTRANSCRIBED_STRAND])
if ((LAGGING in type2ReplicationStrand2CountDict[mutationType]) and (LEADING in type2ReplicationStrand2CountDict[mutationType])):
replicationRatiosDict[mutationType] = np.log10(type2ReplicationStrand2CountDict[mutationType][LAGGING]/type2ReplicationStrand2CountDict[mutationType][LEADING])
if (mutationType in replicationRatiosDict) and (mutationType in transcriptionRatiosDict):
plt.scatter(replicationRatiosDict[mutationType],transcriptionRatiosDict[mutationType], label=mutationType)
########################################################################
legend = plt.legend(loc='upper left', frameon=True, fancybox =False,labels=six_mutation_types, bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_mutation_types_%s_scatter_plot.png' %(STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_mutation_types_%s_%d_%s_scatter_plot.png' %(sample,numberofMutations,STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#July 7, 2020
def plot_types_transcription_log10_ratio_replication_log10_ratio_using_dataframes(signatureType,
sample,
numberofMutations,
type_transcribed_versus_untranscribed_df,
type_lagging_versus_leading_df,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
#################################################################################################
#First check whether we have this signature or not
# type_transcribed_versus_untranscribed_df=type_transcribed_versus_untranscribed_df[['cancer_type', 'type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count', 'transcribed_versus_untranscribed_p_value','transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list' ]]
transcribed_real_count=0
untranscribed_real_count=0
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['Transcribed_real_count'].values.size>0):
transcribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['Transcribed_real_count'].values[0]
if (type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type']==signature]['UnTranscribed_real_count'].values.size>0):
untranscribed_real_count=type_transcribed_versus_untranscribed_df[type_transcribed_versus_untranscribed_df['type'] == signature]['UnTranscribed_real_count'].values[0]
if (transcribed_real_count+untranscribed_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature] = np.log10(transcribed_real_count/untranscribed_real_count)
#################################################################################################
#################################################################################################
# First check whether we have this signature or not
# type_lagging_versus_leading_df=type_lagging_versus_leading_df[['cancer_type', 'type',
# 'Lagging_real_count', 'Leading_real_count', 'Lagging_mean_sims_count', 'Leading_mean_sims_count', 'lagging_versus_leading_p_value', 'lagging_versus_leading_q_value',
# 'Lagging_real_count.1', 'Lagging_mean_sims_count.1', 'Lagging_min_sims_count', 'Lagging_max_sims_count', 'Lagging_sims_count_list',
# 'Leading_real_count.1', 'Leading_mean_sims_count.1', 'Leading_min_sims_count', 'Leading_max_sims_count', 'Leading_sims_count_list' ]]
lagging_real_count=0
leading_real_count = 0
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values.size>0):
lagging_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Lagging_real_count'].values[0]
if (type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values.size>0):
leading_real_count=type_lagging_versus_leading_df[type_lagging_versus_leading_df['type']==signature]['Leading_real_count'].values[0]
if (lagging_real_count+leading_real_count>=SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(lagging_real_count/leading_real_count)
#################################################################################################
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#May 9, 2018 starts
#For Signatures
def plot_ncomms11383_Supp_FigH_AllSignatures_TranscriptionLog10Ratio_ReplicationLog10Ratio(
signatureType,
sample,
numberofMutations,
signature2TranscriptionStrand2CountDict,
signature2ReplicationStrand2CountDict,
signature_cutoff_numberofmutations_averageprobability_df,
outputDir,
jobname):
fig = plt.figure(figsize=(8,8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom','top','left','right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.3, 0.3)
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right+0.02),(bottom+top-0.08), 'Transcribed',horizontalalignment='center',verticalalignment='center',rotation='vertical',transform=ax.transAxes)
plt.text((right+0.02),(bottom+0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
if (sample is not None):
plt.title(sample, fontsize=15, fontweight='bold')
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12, fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.3, 0.3)
plt.ylim(-0.3, 0.3)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
yticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.yticks(yticks, yticklabels)
xticks = [-0.2, -0.1, 0.0, 0.1, 0.2]
xticklabels = ['-0.2', '-0.1', '0.0', '0.1', '0.2']
plt.xticks(xticks, xticklabels)
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
#################################################################################################
#First check whether we have this signature or not
if ((signature in signature2TranscriptionStrand2CountDict) and (TRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) and
(UNTRANSCRIBED_STRAND in (signature2TranscriptionStrand2CountDict[signature])) ):
if ((signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]+signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND]) >= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
transcriptionRatiosDict[signature]= np.log10(signature2TranscriptionStrand2CountDict[signature][TRANSCRIBED_STRAND]/signature2TranscriptionStrand2CountDict[signature][UNTRANSCRIBED_STRAND])
#################################################################################################
#################################################################################################
# First check whether we have this signature or not
if ((signature in signature2ReplicationStrand2CountDict) and (LAGGING in (signature2ReplicationStrand2CountDict[signature])) and
(LEADING in (signature2ReplicationStrand2CountDict[signature]))):
if ((signature2ReplicationStrand2CountDict[signature][LAGGING]+signature2ReplicationStrand2CountDict[signature][LEADING])>= SUBS_STRAND_BIAS_NUMBER_OF_MUTATIONS_THRESHOLD):
replicationRatiosDict[signature] = np.log10(signature2ReplicationStrand2CountDict[signature][LAGGING]/signature2ReplicationStrand2CountDict[signature][LEADING])
#################################################################################################
if (transcriptionRatiosDict and replicationRatiosDict):
signaturesShownInLegend = []
for signature in signature_cutoff_numberofmutations_averageprobability_df['signature'].unique():
if ((signature in replicationRatiosDict.keys()) and (signature in transcriptionRatiosDict.keys())):
signaturesShownInLegend.append(signature)
plt.scatter(replicationRatiosDict[signature], transcriptionRatiosDict[signature], label=signature)
legend = plt.legend(loc='upper left', frameon=True, fancybox=False, labels=signaturesShownInLegend,
bbox_to_anchor=(-0.0095, 1.0095))
legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if sample is None:
figureName = 'all_%s_signatures_%s_scatter_plot.png' % (signatureType, STRANDBIAS)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS,SCATTER_PLOTS,figureName)
else:
figureName = 'all_%s_signatures_%s_%d_%s_scatter_plot.png' % (
signatureType, sample, numberofMutations, STRANDBIAS)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS,SCATTER_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, sample, STRANDBIAS, SCATTER_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#MutationTypeBased SampleBased Figures
def plot_ncomms11383_Supp_FigE_MutationTypeBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(
type2Sample2TranscriptionStrand2CountDict,
type2Sample2ReplicationStrand2CountDict,
outputDir,
jobname,
isFigureAugmentation):
mutationType2ColorDict = {'C>A': 'blue', 'C>G':'black', 'C>T':'red', 'T>A':'gray', 'T>C':'green', 'T>G':'pink'}
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for mutationType in six_mutation_types:
#initialization
if mutationType not in transcriptionRatiosDict:
transcriptionRatiosDict[mutationType] = {}
if mutationType not in replicationRatiosDict:
replicationRatiosDict[mutationType] = {}
#Fill the dictionaries
if mutationType in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys()) and (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[mutationType][sample].keys())):
transcriptionRatiosDict[mutationType][sample]= np.log10(type2Sample2TranscriptionStrand2CountDict[mutationType][sample][TRANSCRIBED_STRAND]/type2Sample2TranscriptionStrand2CountDict[mutationType][sample][UNTRANSCRIBED_STRAND])
if mutationType in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[mutationType].keys():
if ((LAGGING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys()) and (LEADING in type2Sample2ReplicationStrand2CountDict[mutationType][sample].keys())):
replicationRatiosDict[mutationType][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[mutationType][sample][LAGGING]/type2Sample2ReplicationStrand2CountDict[mutationType][sample][LEADING])
for mutationType in six_mutation_types:
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(mutationType, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
if (mutationType in type2Sample2TranscriptionStrand2CountDict):
for sample in type2Sample2TranscriptionStrand2CountDict[mutationType].keys():
if ((sample in replicationRatiosDict[mutationType].keys()) and (sample in transcriptionRatiosDict[mutationType].keys())):
plt.scatter(replicationRatiosDict[mutationType][sample],transcriptionRatiosDict[mutationType][sample], facecolor='none', color=mutationType2ColorDict[mutationType])
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + mutationType)
newMutationType = mutationType.replace('>', '2')
figureName = newMutationType + '_MutationType_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
########################################################################
#SignatureBased SampleBased Figures
#Sig26 is very different
def plot_ncomms11383_Supp_FigF_SignatureBased_AllSamples_TranscriptionLog10Ratio_ReplicationLog10Ratio(type2Sample2TranscriptionStrand2CountDict,type2Sample2ReplicationStrand2CountDict,signatures,outputDir,jobname,isFigureAugmentation):
transcriptionRatiosDict = {}
replicationRatiosDict = {}
for signature in signatures:
# initialization
if signature not in transcriptionRatiosDict:
transcriptionRatiosDict[signature] = {}
if signature not in replicationRatiosDict:
replicationRatiosDict[signature] = {}
# Fill the dictionaries
if signature in type2Sample2TranscriptionStrand2CountDict:
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (UNTRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]) and (TRANSCRIBED_STRAND in type2Sample2TranscriptionStrand2CountDict[signature][sample]):
transcriptionRatiosDict[signature][sample] = np.log10(type2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND] /type2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])
# print(signature, sample)
# print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][TRANSCRIBED_STRAND])
# print(signature2Sample2TranscriptionStrand2CountDict[signature][sample][UNTRANSCRIBED_STRAND])
# print(signature,sample,transcriptionRatiosDict[signature][sample])
if signature in type2Sample2ReplicationStrand2CountDict:
for sample in type2Sample2ReplicationStrand2CountDict[signature].keys():
if (LAGGING in type2Sample2ReplicationStrand2CountDict[signature][sample]) and (LEADING in type2Sample2ReplicationStrand2CountDict[signature][sample]):
replicationRatiosDict[signature][sample] = np.log10(type2Sample2ReplicationStrand2CountDict[signature][sample][LAGGING] /type2Sample2ReplicationStrand2CountDict[signature][sample][LEADING])
for signature in signatures:
if (len(replicationRatiosDict[signature].keys())>0 and len(transcriptionRatiosDict[signature].keys())>0):
fig = plt.figure(figsize=(8, 8), facecolor=None)
plt.style.use('ggplot')
# build a rectangle in axes coords
left, width = .0, 1.
bottom, height = .0, 1.
right = left + width
top = bottom + height
# This code makes the background white.
# Always put these statements after plt.figure
ax = plt.gca()
ax.set_facecolor('white')
for edge_i in ['bottom', 'top', 'left', 'right']:
ax.spines[edge_i].set_edgecolor("black")
ax.spines[edge_i].set_linewidth(1)
ax.spines[edge_i].set_bounds(-0.65, 0.65)
plt.title(signature, fontsize=15, fontweight='bold')
plt.text(0.05, 1.02, 'Leading', ha='center', va='center', transform=ax.transAxes)
plt.text(0.95, 1.02, 'Lagging', ha='center', va='center', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + top - 0.08), 'Transcribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.text((right + 0.02), (bottom + 0.1), 'Untranscribed', horizontalalignment='center',verticalalignment='center', rotation='vertical', transform=ax.transAxes)
plt.xlabel('Lagging/leading replication strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
plt.ylabel('Transcribed/untranscribed strand\nratio(log10)', fontstyle='normal', fontsize=12,fontweight='bold')
# Put some extra place by xlim if necessary
plt.xlim(-0.65, 0.65)
plt.ylim(-0.65, 0.65)
plt.tick_params(axis='y', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='y', which='minor', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='major', labelsize=10, width=1, length=10)
plt.tick_params(axis='x', which='minor', labelsize=10, width=1, length=10)
yticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
yticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.yticks(yticks, yticklabels)
xticks = [-0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6]
xticklabels = ['-0.6', '-0.4', '-0.2', '0.0', '0.2', '0.4', '0.6']
plt.xticks(xticks, xticklabels)
for sample in type2Sample2TranscriptionStrand2CountDict[signature].keys():
if (sample in replicationRatiosDict[signature]) and (sample in transcriptionRatiosDict[signature]):
plt.scatter(replicationRatiosDict[signature][sample], transcriptionRatiosDict[signature][sample],facecolor='none',color='green')
plt.axvline(x=0.0, color='gray', linestyle='--')
plt.axhline(y=0.0, color='gray', linestyle='--')
if (isFigureAugmentation):
plt.title(jobname + ' ' + signature)
figureName = signature.replace(' ','') + '_Signature_' + STRANDBIAS + '.png'
figureFile = os.path.join(outputDir,jobname,FIGURE,STRANDBIAS,SCATTER_PLOTS,figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
########################################################################
def is_there_at_least_10perc_diff(strand1_value, strand2_value):
diff = abs(strand1_value - strand2_value)
if (diff >= (strand1_value/10)) or (diff >= (strand2_value/10)):
return True
else:
return False
# Only this method supports simulations
# key can be a sample or a signature
def plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
key,
isKeySample,
numberofMutations,
N,
x_axis_labels,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
mutationsOrSignatures,
color1,
color2,
figureName,
width,
plot_mode):
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
# the x locations for the groups
ind = np.arange(N)
fig, ax = plt.subplots(figsize=(16,10),dpi=300)
legend=None
rects1=None
rects2=None
rects3=None
rects4=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind +3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
# add some text for labels, title and axes ticks
if plot_mode==PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL:
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
locs, labels = plt.yticks()
ax.set_ylim(0, locs[-1] + 5000)
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
if key is not None:
ax.set_title('%s %s vs. %s %s' %(key,strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
else:
ax.set_title('%s vs. %s %s' %(strand1Name,strand2Name,mutationsOrSignatures), fontsize=20,fontweight='bold')
# Set x tick labels
if len(x_axis_labels) > 6:
ax.set_xticklabels(x_axis_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_labels, fontsize=35)
# Set the ylabel
plt.ylabel('Number of single base substitutions', fontsize=35, fontweight='normal')
# set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
elif plot_mode == PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_MANUSCRIPT:
# set axis ticks
# ax.tick_params(axis='both', which='both', length=0)
ax.tick_params(axis='x', which='both', length=0)
ax.tick_params(axis='y', which='both', length=0)
# set axis labels
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
if (numberofSimulations > 0):
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 30}, ncol=1, loc='best')
else:
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 35},loc='upper right')
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'***', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'**', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and (is_there_at_least_10perc_diff(strand1_value, strand2_value))):
plt.annotate(
'*', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=20) # Vertically align label differently for
# positive and negative values.
# Add star above the bars for significant differences between the number of mutations on each strand ends
#########################################################################################################
if (key is None):
figureName = '%s_bar_plot.png' %(figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
elif (not isKeySample):
figureName = '%s_%s_bar_plot.png' %(key,figureName)
figureFile = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, BAR_PLOTS, figureName)
else:
figureName = '%s_%s_%d_bar_plot.png' %(figureName,key,numberofMutations)
os.makedirs(os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS), exist_ok=True)
figureFile = os.path.join(outputDir, jobname, FIGURE, SAMPLES, key, STRANDBIAS, BAR_PLOTS, figureName)
fig.savefig(figureFile)
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_circle_plot_in_given_axis(ax,
percentage_strings,
sbs_signature,
six_mutation_types,
xticklabels_list,
signature2mutation_type2strand2percentagedict):
strand_bias_list=[LAGGING_VERSUS_LEADING, TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC]
# make aspect ratio square
ax.set_aspect(1.0)
# set title
title = '%s Strand Bias' %(sbs_signature)
ax.text(len(percentage_strings) * 3, len(strand_bias_list) + 2.5, title, horizontalalignment='center',fontsize=60, fontweight='bold', fontname='Arial')
# Colors are from SigProfilerPlotting tool to be consistent
colors = [[3 / 256, 189 / 256, 239 / 256],
[1 / 256, 1 / 256, 1 / 256],
[228 / 256, 41 / 256, 38 / 256],
[203 / 256, 202 / 256, 202 / 256],
[162 / 256, 207 / 256, 99 / 256],
[236 / 256, 199 / 256, 197 / 256]]
# Put rectangles
x = 0
for i in range(0, len(six_mutation_types), 1):
ax.text((x + (len(percentage_strings) / 2) - 0.75), len(strand_bias_list) + 1.5, six_mutation_types[i],fontsize=55, fontweight='bold', fontname='Arial')
ax.add_patch(plt.Rectangle((x + .0415, len(strand_bias_list) + 0.75), len(percentage_strings) - (2 * .0415), .5,facecolor=colors[i], clip_on=False))
ax.add_patch(plt.Rectangle((x, 0), len(percentage_strings), len(strand_bias_list), facecolor=colors[i], zorder=0,alpha=0.25, edgecolor='grey'))
x += len(percentage_strings)
# CODE GOES HERE TO CENTER X-AXIS LABELS...
ax.set_xlim([0, len(six_mutation_types) * len(percentage_strings)])
ax.set_xticklabels([])
ax.tick_params(axis='x', which='minor', length=0, labelsize=35)
# major ticks
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1))
# minor ticks
ax.set_xticks(np.arange(0, len(six_mutation_types) * len(percentage_strings), 1) + 0.5, minor=True)
ax.set_xticklabels(xticklabels_list, minor=True)
ax.xaxis.set_label_position('top')
ax.xaxis.set_ticks_position('top')
ax.tick_params(
axis='x', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False) # labels along the bottom edge are off
# CODE GOES HERE TO CENTER Y-AXIS LABELS...
ax.set_ylim([0, len(strand_bias_list)])
ax.set_yticklabels([])
ax.tick_params(axis='y', which='minor', length=0, labelsize=40)
# major ticks
ax.set_yticks(np.arange(0, len(strand_bias_list), 1))
# minor ticks
ax.set_yticks(np.arange(0, len(strand_bias_list), 1) + 0.5, minor=True)
ax.set_yticklabels(['', sbs_signature,''], minor=True) # fontsize
ax.tick_params(
axis='y', # changes apply to the x-axis
which='major', # both major and minor ticks are affected
left=False) # labels along the bottom edge are off
# Gridlines based on major ticks
ax.grid(which='major', color='black', zorder=3)
# Put the legend
legend_elements = [
Line2D([0], [0], marker='o', color='white', label=GENIC, markerfacecolor='cyan', markersize=40),
Line2D([0], [0], marker='o', color='white', label=INTERGENIC, markerfacecolor='gray', markersize=40),
Line2D([0], [0], marker='o', color='white', label=TRANSCRIBED_STRAND, markerfacecolor='royalblue',markersize=40),
Line2D([0], [0], marker='o', color='white', label=UNTRANSCRIBED_STRAND, markerfacecolor='yellowgreen',markersize=40),
Line2D([0], [0], marker='o', color='white', label=LAGGING, markerfacecolor='indianred', markersize=40),
Line2D([0], [0], marker='o', color='white', label=LEADING, markerfacecolor='goldenrod', markersize=40)]
legend = ax.legend(handles=legend_elements, ncol=len(legend_elements), bbox_to_anchor=(0.5, 0), loc='upper center',fontsize=40)
# legend.get_frame().set_linewidth(1)
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
for percentage_diff_index, percentage_string in enumerate(percentage_strings):
for mutation_type_index, mutation_type in enumerate(six_mutation_types):
# for row_sbs_signature_index, row_sbs_signature in enumerate(rows_sbs_signatures):
# strand_bias_list = [TRANSCRIBED_VERSUS_UNTRANSCRIBED, GENIC_VERSUS_INTERGENIC, LAGGING_VERSUS_LEADING]
for strand_bias_index, strand_bias in enumerate(strand_bias_list):
if (strand_bias == LAGGING_VERSUS_LEADING):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
lagging_percentage = None
leading_percentage = None
if (LAGGING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LAGGING][percentage_string] == 1):
lagging_percentage = 100
if (LEADING in signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][LEADING][percentage_string] == 1):
leading_percentage = 100
if (lagging_percentage is not None) and (leading_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(percentage_strings) + percentage_diff_index + 0.5,strand_bias_index + 0.5), radius, color='indianred', fill=True))
elif (leading_percentage is not None) and (lagging_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index, row_sbs_signature,mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='goldenrod', fill=True))
elif (lagging_percentage is not None) and (leading_percentage is not None):
radius_lagging = 0.49
radius_leading = 0.49
if (radius_lagging > radius_leading):
# First lagging
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
# Second leading
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
else:
# First leading
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_leading,
color='goldenrod', fill=True))
# Second lagging
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_lagging,
color='indianred', fill=True))
elif (strand_bias == GENIC_VERSUS_INTERGENIC):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
genic_percentage = None
intergenic_percentage = None
if (GENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
GENIC][percentage_string] == 1):
genic_percentage = 100
if (INTERGENIC in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
INTERGENIC][percentage_string] == 1):
intergenic_percentage = 100
if (genic_percentage is not None) and (intergenic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='cyan',
fill=True))
elif (intergenic_percentage is not None) and (genic_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius, color='gray',
fill=True))
elif (genic_percentage is not None) and (intergenic_percentage is not None):
radius_genic = 0.49
radius_intergenic = 0.49
if (radius_genic > radius_intergenic):
# First genic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
# Second intergenic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
else:
# First intergenic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_intergenic,
color='gray', fill=True))
# Second genic
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_genic,
color='cyan', fill=True))
elif (strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED):
if sbs_signature in signature2mutation_type2strand2percentagedict:
if mutation_type in signature2mutation_type2strand2percentagedict[sbs_signature]:
transcribed_percentage = None
untranscribed_percentage = None
if (TRANSCRIBED_STRAND in signature2mutation_type2strand2percentagedict[sbs_signature][
mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
TRANSCRIBED_STRAND][percentage_string] == 1):
transcribed_percentage = 100
if (UNTRANSCRIBED_STRAND in
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type]) and (
signature2mutation_type2strand2percentagedict[sbs_signature][mutation_type][
UNTRANSCRIBED_STRAND][percentage_string] == 1):
untranscribed_percentage = 100
if (transcribed_percentage is not None) and (untranscribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='royalblue', fill=True))
elif (untranscribed_percentage is not None) and (transcribed_percentage is None):
radius = 0.49
if (radius > 0):
# print('Plot circle at x=%d y=%d for %s %s %s' % (mutation_type_index * len(percentage_strings) + percentage_diff_index, row_sbs_signature_index,row_sbs_signature, mutation_type, percentage_string))
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius,
color='yellowgreen', fill=True))
elif (transcribed_percentage is not None) and (untranscribed_percentage is not None):
radius_transcribed = 0.49
radius_untranscribed = 0.49
if (radius_transcribed > radius_untranscribed):
# First transcribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
# Second untranscribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
else:
# First untranscribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_untranscribed,
color='yellowgreen', fill=True))
# Second transcribed
ax.add_patch(plt.Circle((mutation_type_index * len(
percentage_strings) + percentage_diff_index + 0.5,
strand_bias_index + 0.5), radius_transcribed,
color='royalblue', fill=True))
# June 2, 2021
def plot_strand_bias_figure_with_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
# the x locations for the groups
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend = None
rects3 = None
rects4 = None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind + width, strand2_values, width=width, edgecolor='black', color=color2)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
rects3 = ax.bar(ind+ 2*width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
rects4 = ax.bar(ind + 3*width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///')
# add some text for labels, title and axes ticks
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ymax = np.nanmax([np.nanmax(strand1_values),
np.nanmax(strand2_values),
np.nanmax(strand1_simulations_median_values),
np.nanmax(strand2_simulations_median_values)])
y = ymax / 1.025
ytick_offest = float(y / 3)
ylabs = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = [0, ytick_offest, ytick_offest * 2, ytick_offest * 3, ytick_offest * 4]
ylabels = ['{:,}'.format(int(x)) for x in ylabels]
if len(ylabels[-1]) > 3:
ylabels_temp = []
if len(ylabels[-1]) > 7:
for label in ylabels:
if len(label) > 7:
ylabels_temp.append(label[0:-8] + "m")
elif len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
else:
for label in ylabels:
if len(label) > 3:
ylabels_temp.append(label[0:-4] + "k")
else:
ylabels_temp.append(label)
ylabels = ylabels_temp
ax.set_ylim([0, y])
ax.set_yticks(ylabs)
ax.set_yticklabels(ylabels, fontsize=35, fontweight='bold', fontname='Arial')
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
ax.set_title('%s vs. %s' %(strand1Name,strand2Name), fontsize=40, fontweight='bold')
# Set x tick labels
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
# Set the ylabel
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
# Set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (3 * width) / 2)
realStrand1Name = 'Real %s' % (strand1Name)
realStrand2Name = 'Real %s' % (strand2Name)
simulationsStrand1Name = 'Simulated %s' % (strand1Name)
simulationsStrand2Name = 'Simulated %s' % (strand2Name)
if ((rects1 is not None) and (rects2 is not None) and (rects3 is not None) and (rects4 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0) and (len(rects3) > 0) and (len(rects4) > 0)):
legend = ax.legend((rects1[0], rects2[0], rects3[0], rects4[0]),
(realStrand1Name, realStrand2Name, simulationsStrand1Name, simulationsStrand2Name),prop={'size': 25}, ncol=1, loc='best')
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
if ((rects1 is not None) and (rects2 is not None)):
if ((len(rects1) > 0) and (len(rects2) > 0)):
legend = ax.legend((rects1[0], rects2[0]), (strand1Name, strand2Name), prop={'size': 25}, ncol=1, loc='upper right')
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if fdr_bh_adjusted_pvalues is not None:
for fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
y_value = max(rect1.get_height(),rect2.get_height())
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'***', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'**', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)) :
ax.annotate(
'*', # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
if axis_given == None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0 + 0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias==TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plot_strand_bias_figure_with_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given = axis)
# June 2, 2021
def plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
strandbias_figures_outputDir,
numberofSimulations,
signature,
N,
x_axis_tick_labels,
y_axis_label,
strand1_values,
strand2_values,
strand1_simulations_median_values,
strand2_simulations_median_values,
fdr_bh_adjusted_pvalues,
strand1Name,
strand2Name,
color1,
color2,
width,
axis_given=None):
# Replace np.nans with 0
strand1_values = [0 if np.isnan(x) else x for x in strand1_values]
strand2_values = [0 if np.isnan(x) else x for x in strand2_values]
strand1_simulations_median_values = [0 if np.isnan(x) else x for x in strand1_simulations_median_values]
strand2_simulations_median_values = [0 if np.isnan(x) else x for x in strand2_simulations_median_values]
# Fill odds_ratio_list
odds_real_list = []
odds_sims_list = []
for a, b in zip(strand1_values, strand2_values):
odds_real = np.nan
if b>0:
odds_real = a/b
odds_real_list.append(odds_real)
for x, y in zip(strand1_simulations_median_values, strand2_simulations_median_values):
odds_sims = np.nan
if y > 0:
odds_sims = x/y
odds_sims_list.append(odds_sims)
odds_ratio_list = [odds_real/odds_sims if odds_sims>0 else np.nan for (odds_real, odds_sims) in zip(odds_real_list,odds_sims_list)]
# Here we can take into difference between strand1_values and strand2_values while deciding on significance
# the x locations for the groups
ind = np.arange(N)
if axis_given == None:
fig, ax = plt.subplots(figsize=(16,10),dpi=100)
else:
ax = axis_given
legend=None
rects1 = ax.bar(ind, strand1_values, width=width, edgecolor='black', color=color1)
rects2 = ax.bar(ind, strand2_values, width=width, edgecolor='black', color=color2, bottom=strand1_values)
if ((strand1_simulations_median_values is not None) and strand1_simulations_median_values):
ax.bar(ind + width, strand1_simulations_median_values, width=width, edgecolor='black', color=color1, hatch = '///')
if ((strand2_simulations_median_values is not None) and strand2_simulations_median_values):
ax.bar(ind + width, strand2_simulations_median_values, width=width, edgecolor='black', color=color2, hatch = '///', bottom=strand1_simulations_median_values)
# Add some text for labels, title and axes ticks
ax.tick_params(axis='x', labelsize=35)
ax.tick_params(axis='y', labelsize=35)
ax.set_ylim(0, 1.1)
ax.set_yticklabels([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=35)
# To make the bar width not too wide
if len(ind) < 6:
maxn = 6
ax.set_xlim(-0.5, maxn - 0.5)
# Set title
stacked_bar_title = 'Real vs. Simulated\nOdds Ratio of %s vs. %s' %(strand1Name, strand2Name)
ax.set_title(stacked_bar_title, fontsize=40, fontweight='bold')
# Set x tick labels
if len(x_axis_tick_labels) > 6:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35, rotation=90)
else:
ax.set_xticklabels(x_axis_tick_labels, fontsize=35)
# Set the ylabel
if y_axis_label:
ax.set_ylabel(y_axis_label, fontsize=35, fontweight='normal', labelpad=15)
# Set the x axis tick locations
if (numberofSimulations > 0):
ax.set_xticks(ind + (width/2))
else:
# Old way with no simulations
ax.set_xticks(ind + width / 2)
# To make the barplot background white
ax.set_facecolor('white')
# To makes spines black like a rectangle with black stroke
ax.spines["bottom"].set_color('black')
ax.spines["left"].set_color('black')
ax.spines["top"].set_color('black')
ax.spines["right"].set_color('black')
if (legend is not None):
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Add star above the bars for significant differences between the number of mutations on each strand starts
# For each bar: Place a label
if odds_ratio_list is not None:
for odds_ratio, fdr_bh_adjusted_pvalue, strand1_value, strand2_value, rect1, rect2 in zip(odds_ratio_list, fdr_bh_adjusted_pvalues, strand1_values, strand2_values, rects1, rects2):
# Get X and Y placement of label from rect.
# y_value = max(rect1.get_height(),rect2.get_height())
y_value = rect1.get_height() + rect2.get_height()
x_value = rect1.get_x() + rect1.get_width()
# Number of points between bar and label. Change to your liking.
space = 3
# Vertical alignment for positive values
va = 'bottom'
# If value of bar is negative: Place label below bar
if y_value < 0:
# Invert space to place label below
space *= -1
# Vertically align label at top
va = 'top'
# Use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
if not np.isnan(odds_ratio):
if ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.0001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f ***' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= 0.001) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f **' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
elif ((fdr_bh_adjusted_pvalue is not None) and (fdr_bh_adjusted_pvalue <= SIGNIFICANCE_LEVEL) and is_there_at_least_10perc_diff(strand1_value, strand2_value)):
ax.annotate(
'%.2f *' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
else:
ax.annotate(
'%.2f' %(odds_ratio), # Use `label` as label
(x_value, y_value), # Place label at end of the bar
xytext=(0, space), # Vertically shift label by `space`
textcoords="offset points", # Interpret `xytext` as offset in points
ha='center', # Horizontally center label
va=va,
fontsize=25) # Vertically align label differently for
if axis_given==None:
filename = '%s_%s_with_bars.png' %(signature,strand_bias)
figFile = os.path.join(strandbias_figures_outputDir, filename)
fig.savefig(figFile, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# June 2, 2021
def plot_stacked_bar_plot_in_given_axis(axis,
sbs_signature,
strand_bias,
strands_list,
signature_strand1_versus_strand2_df,
y_axis_label = None):
box = axis.get_position()
axis.set_position([box.x0, box.y0+0.125, box.width * 1, box.height * 1], which='both')
mutation_types = six_mutation_types
numberofSimulations = 100
width = 0.20
if strand_bias == LAGGING_VERSUS_LEADING:
strands = strands_list
strand1 = "Lagging_real_count"
strand2 = "Leading_real_count"
strand1_sims = "Lagging_mean_sims_count"
strand2_sims = "Leading_mean_sims_count"
q_value_column_name = "lagging_versus_leading_q_value"
color1 = 'indianred'
color2 = 'goldenrod'
elif strand_bias == TRANSCRIBED_VERSUS_UNTRANSCRIBED:
strands = strands_list
strand1 = "Transcribed_real_count"
strand2 = "UnTranscribed_real_count"
strand1_sims = "Transcribed_mean_sims_count"
strand2_sims = "UnTranscribed_mean_sims_count"
q_value_column_name = "transcribed_versus_untranscribed_q_value"
color1 = 'royalblue'
color2 = 'yellowgreen'
elif strand_bias == GENIC_VERSUS_INTERGENIC:
strands = strands_list
strand1 = "genic_real_count"
strand2 = "intergenic_real_count"
strand1_sims = "genic_mean_sims_count"
strand2_sims = "intergenic_mean_sims_count"
q_value_column_name = "genic_versus_intergenic_q_value"
color1 = 'cyan'
color2 = 'gray'
groupby_df = signature_strand1_versus_strand2_df.groupby(['signature'])
group_df = groupby_df.get_group(sbs_signature)
mutationtype_strand1_real_list = []
mutationtype_strand2_real_list = []
mutationtype_strand1_sims_mean_list = []
mutationtype_strand2_sims_mean_list = []
mutationtype_FDR_BH_adjusted_pvalues_list = []
for mutation_type in six_mutation_types:
strand1_real_count=group_df[group_df['mutation_type'] == mutation_type][strand1].values[0]
strand2_real_count=group_df[group_df['mutation_type'] == mutation_type][strand2].values[0]
strand1_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand1_sims].values[0]
strand2_sims_count=group_df[group_df['mutation_type'] == mutation_type][strand2_sims].values[0]
q_value=group_df[group_df['mutation_type'] == mutation_type][q_value_column_name].values[0]
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
if (strand1_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_real_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_real_list.append(strand1_real_count/(strand1_real_count+strand2_real_count))
mutationtype_strand2_real_list.append(strand2_real_count/(strand1_real_count+strand2_real_count))
else:
mutationtype_strand1_real_list.append(np.nan)
mutationtype_strand2_real_list.append(np.nan)
if (strand1_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT) or (strand2_sims_count >= NUMBER_OF_REQUIRED_MUTATIONS_FOR_STRAND_BIAS_BAR_PLOT):
mutationtype_strand1_sims_mean_list.append(strand1_sims_count/(strand1_sims_count+strand2_sims_count))
mutationtype_strand2_sims_mean_list.append(strand2_sims_count/(strand1_sims_count+strand2_sims_count))
else:
mutationtype_strand1_sims_mean_list.append(np.nan)
mutationtype_strand2_sims_mean_list.append(np.nan)
plot_strand_bias_figure_with_stacked_bar_plots(strand_bias,
None,
numberofSimulations,
sbs_signature,
len(mutation_types),
mutation_types,
y_axis_label,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
color1,
color2,
width,
axis_given=axis)
def plot_circle_bar_plots_together(outputDir,
jobname,
sbs_signature,
six_mutation_types,
signature2mutation_type2strand2percentagedict,
signature_genic_versus_intergenic_df,
signature_transcribed_versus_untranscribed_df,
signature_lagging_versus_leading_df,
genic_vs_intergenic_strands,
transcription_strands,
replication_strands):
x_ticklabels_list = percentage_strings * 6
fig = plt.figure(figsize=(5 + 1.5 * len(x_ticklabels_list), 30 + 1.5))
plt.rc('axes', edgecolor='lightgray')
width = 6
height = 6
width_ratios = [1] * width
height_ratios = [1] * height
gs = gridspec.GridSpec(height, width, height_ratios = height_ratios, width_ratios = width_ratios)
fig.subplots_adjust(hspace=0, wspace=3)
cirle_plot_axis = plt.subplot(gs[0:2, :])
genic_vs_intergenic_bar_plot_axis = plt.subplot(gs[2:4, 0:2])
transcribed_vs_untranscribed_bar_plot_axis = plt.subplot(gs[2:4, 2:4])
lagging_vs_leading_bar_plot_axis = plt.subplot(gs[2:4, 4:6])
genic_vs_intergenic_stacked_bar_plot_axis = plt.subplot(gs[4:, 0:2])
transcribed_vs_untranscribed_stacked_bar_plot_axis = plt.subplot(gs[4:, 2:4])
lagging_vs_leading_stacked_bar_plot_axis = plt.subplot(gs[4:, 4:6])
# Circle plot with legends
plot_circle_plot_in_given_axis(cirle_plot_axis,
percentage_strings,
sbs_signature,
six_mutation_types,
x_ticklabels_list,
signature2mutation_type2strand2percentagedict)
# 3 Bar plots side by side
plot_bar_plot_in_given_axis(genic_vs_intergenic_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Number of Single Base Substitutions')
plot_bar_plot_in_given_axis(transcribed_vs_untranscribed_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_bar_plot_in_given_axis(lagging_vs_leading_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
# 3 Stacked Bar plots side by side
plot_stacked_bar_plot_in_given_axis(genic_vs_intergenic_stacked_bar_plot_axis,
sbs_signature,
GENIC_VERSUS_INTERGENIC,
genic_vs_intergenic_strands,
signature_genic_versus_intergenic_df,
y_axis_label = 'Ratio of mutations on each strand')
plot_stacked_bar_plot_in_given_axis(transcribed_vs_untranscribed_stacked_bar_plot_axis,
sbs_signature,
TRANSCRIBED_VERSUS_UNTRANSCRIBED,
transcription_strands,
signature_transcribed_versus_untranscribed_df)
plot_stacked_bar_plot_in_given_axis(lagging_vs_leading_stacked_bar_plot_axis,
sbs_signature,
LAGGING_VERSUS_LEADING,
replication_strands,
signature_lagging_versus_leading_df)
# filename = '%s_circle_bar_plot_together_%s.png' % (sbs_signature, str(significance_level).replace('.', '_'))
filename = '%s_circle_bar_plots.png' % (sbs_signature)
figurepath = os.path.join(outputDir, jobname, FIGURE, STRANDBIAS, CIRCLE_BAR_PLOTS, filename)
fig.savefig(figurepath, dpi=100, bbox_inches="tight")
plt.cla()
plt.close(fig)
# Key can be signature or sample
def plotBarPlotsUsingDataframes(outputDir,
jobname,
numberofSimulations,
signature_cutoff_numberofmutations_averageprobability_df,
isKeySample,
existingMutationTypesList,
signature_strand1_versus_strand2_df,
width,
strand1_versus_strand2,
strands,
color1,
color2,
title,
figureName,
plot_mode):
# signature_strand1_versus_strand2_df column names here
# ['cancer_type', 'signature', 'mutation_type',
# 'Transcribed_real_count', 'UnTranscribed_real_count', 'Transcribed_mean_sims_count', 'UnTranscribed_mean_sims_count',
# 'transcribed_versus_untranscribed_p_value', 'transcribed_versus_untranscribed_q_value',
# 'Transcribed_real_count.1', 'Transcribed_mean_sims_count.1', 'Transcribed_min_sims_count', 'Transcribed_max_sims_count', 'Transcribed_sims_count_list',
# 'UnTranscribed_real_count.1', 'UnTranscribed_mean_sims_count.1', 'UnTranscribed_min_sims_count', 'UnTranscribed_max_sims_count', 'UnTranscribed_sims_count_list']
signatures = signature_strand1_versus_strand2_df['signature'].unique()
x_axis_labels = existingMutationTypesList
N = len(x_axis_labels)
for signature in signatures:
numberofMutations = int(signature_cutoff_numberofmutations_averageprobability_df[signature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['number_of_mutations'].values[0])
mutationtype_strand1_real_list=[]
mutationtype_strand2_real_list=[]
mutationtype_strand1_sims_mean_list=[]
mutationtype_strand2_sims_mean_list=[]
mutationtype_FDR_BH_adjusted_pvalues_list=[]
for mutation_type in existingMutationTypesList:
if (strand1_versus_strand2==TRANSCRIBED_VERSUS_UNTRANSCRIBED):
strand1_real_count_column_name=TRANSCRIBED_REAL_COUNT
strand1_sims_mean_count_Column_name=TRANSCRIBED_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=UNTRANSCRIBED_REAL_COUNT
strand2_sims_mean_count_Column_name=UNTRANSCRIBED_SIMULATIONS_MEAN_COUNT
q_value_column_name = TRANSCRIBED_VERSUS_UNTRANSCRIBED_Q_VALUE
elif (strand1_versus_strand2 == GENIC_VERSUS_INTERGENIC):
strand1_real_count_column_name=GENIC_REAL_COUNT
strand1_sims_mean_count_Column_name=GENIC_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=INTERGENIC_REAL_COUNT
strand2_sims_mean_count_Column_name=INTERGENIC_SIMULATIONS_MEAN_COUNT
q_value_column_name = GENIC_VERSUS_INTERGENIC_Q_VALUE
elif (strand1_versus_strand2 == LAGGING_VERSUS_LEADING):
strand1_real_count_column_name=LAGGING_REAL_COUNT
strand1_sims_mean_count_Column_name=LAGGING_SIMULATIONS_MEAN_COUNT
strand2_real_count_column_name=LEADING_REAL_COUNT
strand2_sims_mean_count_Column_name=LEADING_SIMULATIONS_MEAN_COUNT
q_value_column_name = LAGGING_VERSUS_LEADING_Q_VALUE
strand1_real_count = 0
strand1_sims_mean_count = 0
strand2_real_count = 0
strand2_sims_mean_count = 0
q_value = None
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values.size>0):
strand1_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand1_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values.size>0):
strand1_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand1_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values.size>0):
strand2_real_count=signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature']==signature) & (signature_strand1_versus_strand2_df['mutation_type']==mutation_type)][strand2_real_count_column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values.size>0):
strand2_sims_mean_count = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][strand2_sims_mean_count_Column_name].values[0]
if (signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values.size>0):
q_value = signature_strand1_versus_strand2_df[(signature_strand1_versus_strand2_df['signature'] == signature) & (signature_strand1_versus_strand2_df['mutation_type'] == mutation_type)][q_value_column_name].values[0]
mutationtype_strand1_real_list.append(strand1_real_count)
mutationtype_strand1_sims_mean_list.append(strand1_sims_mean_count)
mutationtype_strand2_real_list.append(strand2_real_count)
mutationtype_strand2_sims_mean_list.append(strand2_sims_mean_count)
mutationtype_FDR_BH_adjusted_pvalues_list.append(q_value)
plotStrandBiasFigureWithBarPlots(outputDir,
jobname,
numberofSimulations,
signature,
isKeySample,
numberofMutations,
N,
x_axis_labels,
mutationtype_strand1_real_list,
mutationtype_strand2_real_list,
mutationtype_strand1_sims_mean_list,
mutationtype_strand2_sims_mean_list,
mutationtype_FDR_BH_adjusted_pvalues_list,
strands[0],
strands[1],
title,
color1,
color2,
figureName,
width,
plot_mode)
###################################################################
# April 20, 2020
# July 4, 2020 starts
# Using dataframes
def transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode):
# Initialize these dataframes as empty dataframe
# We will read these dataframes if there is the corresponding data
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
sbs_df = pd.DataFrame()
dbs_df = pd.DataFrame()
id_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 00:04:41 2020
@author: shashanknigam
web parser for amazon:
Things to be extracted: 1. Title of the product span id = "productTitle"
2. Number of rating : span id = acrCustomerReviewText
3. Average rating given:span class a-icon-alt
4. Description: div id = featurebullets_feature_div.text
5. Product description: heading description format h3:a-spacing-mini :- neighboring text p class="a-spacing-base"
6. Other features if any h4 class="a-spacing-mini" p : afterwards.
-- later consideration 6.5: Comparison id=HLCXComparisonTable
item heading: tr class="comparison_table_image_row"
img.src :Name
class="a-row a-spacing-top-small"
7. Product information div id = "productDetails_detailBullets_sections1"
1. Product dimensions th label td value
2. Item weight
3. Shipping weight
4. Manufacturer
5. ASIN
6. Model Number
7. Customer reviews
8. Best sellers rank
9. Warantee if any
8. Question answers: div =class="a-section a-spacing-none askBtfTopQuestionsContainer" ; span class = "a-text-bold" next sibling id (class="a-declarative")the child question next span class= askLongText class="a-color-tertiary a-nowrap" for r the next teritory wrap
9. Customer reviews: all if possible : - class="cr-lighthouse-term " (terms)
1. data-hook="review-star-rating" user rating
2. data-hook="review-title"
3. class="a-row a-spacing-small review-data" detailed review
4. data-hook="see-all-reviews-link-foot"
5. class="a-last"
10. Price: span id = priceblock_ourprice
Hanumanji
a-section celwidget
cr-dp-lighthut
["a-fixed-left-grid","a-spacing-base"]
['a-fixed-left-grid-col', 'a-col-right']
reviews-medley-footer
id="cr-dp-desktop-lighthut"
["a-fixed-right-grid-col","cm_cr_grid_center_right"]
"""
"""
Getting each details out:
"""
from selenium import webdriver
import time
from bs4 import BeautifulSoup as soup
import bs4
import sys
import traceback
import numpy as np
import pandas as pd
import gc
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Name":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Detail":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
failed = []
#QA= {"Question":[],"Answers":[],"ASIN":[]}
#customerReviews = {"ASIN":[],"UserRating":[],"Title":[],"detailedReview":[]}
pages=0
driver = 0
ASIN_LIST = []
def initASIN_LIST():
global ASIN_LIST
df = pd.read_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx')
ASIN_LIST = list(df['ASIN'])
def readWebpage(url,driver_not_in_use=-1):
try:
global pages
global driver
driver = np.random.randint(0,2)
while driver==driver_not_in_use:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
contents = browser.page_source
#time.sleep(1)
browser.close()
del browser
return contents
except:
try:
driver = np.random.randint(0,2)
if driver ==0:
browser = webdriver.Safari()
elif driver==1:
browser = webdriver.Chrome('/Users/shashanknigam/Downloads/Beautiful Soup/chromedriver')
#elif driver==2:
# browser=webdriver.Firefox('/Users/shashanknigam/Downloads/Beautiful Soup/')
browser.get(url)
browser.close()
del browser
return contents
except:
print(sys.exc_info())
print(traceback.format_exc())
return None
#time.sleep(10)
def getSoup(url):
global driver
w = readWebpage(url)
if w is not None:
s = soup(w,'html.parser')
while "Robot Check" in s.text:
w = readWebpage(url,driver)
s = soup(w,'html.parser')
else:
s=None
return s
def get(s,tag,attr=None):
if attr is None:
return s.find_all(tag)
else:
#print("searching for attribute:"+attr)
tags = s.find_all(tag)
return [t for t in tags if attr in t.attrs.keys()]
def getNextSibling(tag):
while True:
if tag.next_sibling == '' or tag.next_sibling is None:
return None
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br':
tag = tag.next_sibling
else:
return tag.next_sibling
def getNextSiblingText(tag):
while True:
#print(tag)
if tag.next_sibling == '' or tag.next_sibling is None:
return ''
elif tag.next_sibling in ['\n','\xa0'] or tag.next_sibling.name=='br' or tag.next_sibling==' ':
tag = tag.next_sibling
else:
if isinstance(tag.next_sibling,bs4.element.Tag):
return tag.next_sibling.text
else:
return str(tag.next_sibling)
def parseQA(url,QA,ASIN):
s=getSoup(url)
if s is not None:
s_div = get(s,'div','class')
qa_div = [q for q in s_div if q['class']==['celwidget']]
if len(qa_div)>1:
qa_div = qa_div[1]
elif len(qa_div)==1:
qa_div = qa_div[0]
else:
qa_div=None
if qa_div is not None:
qa=get(qa_div,'div','class')
qa_inner = [q for q in qa if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print("qa_inner",len(qa_inner))
for i in qa_inner:
qa_inner_temp=get(i,'div','class')
qa_inner_inner=[q for q in qa_inner_temp if q['class']==['a-fixed-left-grid-col', 'a-col-right']]
#print(len(qa_inner_inner))
if len(qa_inner_inner)>1:
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append(qa_inner_inner[1].span.text.strip())
#QA[qa_inner_inner[0].text.strip()]=qa_inner_inner[1].span.text.strip()
elif len(qa_inner_inner)==1:
#print(qa_inner_inner)
QA['ASIN'].append(ASIN)
QA['Question'].append(qa_inner_inner[0].text.strip())
QA['Answer'].append('')
#QA[qa_inner_inner[0].text.strip()]=''
li = get(s,'li','class')
li_last = [l for l in li if l['class']==['a-last']]
next_url = ""
if len(li_last)!=0:
if 'https://www.amazon.com/' not in li_last[0].a['href']:
next_url='https://www.amazon.com/'+li_last[0].a['href']
else:
next_url= li_last[0].a['href']
else:
next_url=""
s.decompose()
else:
next_url=""
return QA,next_url
def parseReview(url,review,ASIN):
#cm_cr-review_list
s=getSoup(url)
if s is not None:
s_div = get(s,'div','id')
div_reviews = [d for d in s_div if d['id']=="cm_cr-review_list"]
if len(div_reviews)>0:
div_reviews=div_reviews[0]
div_review = get(div_reviews,"div","data-hook")
div_r = [r for r in div_review if r['data-hook']=='review']
for i in div_r:
try:
rating_i = get(i,'i','data-hook')
rating = [r for r in rating_i if r['data-hook']=="review-star-rating"]
rating = rating[0].text.strip()
span_d = get(i,'span','data-hook')
date = [d for d in span_d if d['data-hook']=="review-date"]
date = date[0].text.strip()
review_t = get(i,'a','data-hook')
review_title=[t for t in review_t if t['data-hook']=="review-title"]
review_title = review_title[0].text.strip()
review_b=[b for b in span_d if b['data-hook']=="review-body"]
review_b = review_b[0].text.strip()
review["ASIN"].append(ASIN)
review["Rating"].append(rating)
review["Date"].append(date)
review["Title"].append(review_title)
review["Body"].append(review_b)
except:
print(sys.exc_info())
print(traceback.format_exc())
pass
li = get(s,'li','class')
next_url = [l for l in li if l['class']==["a-last"]]
if len(next_url)>0:
url ='https://www.amazon.com'+next_url[0].a['href']
else:
print("Error")
url=None
else:
url=None
s.decompose()
else:
url=None
#span
# data-hook = "review-date"
# i data-hook "review-star-rating"
# span data-hook "review-title"
#a-section review aok-relative
return url,review
def appendExcel(filename,df1):
df = pd.read_excel(filename,index_col=0)
df = df.append(df1)
df.to_excel(filename)
df=None
def parseAmazon(url):
#global pages
#global product_dict,productDetails,Description,productQA,productInformation,ASIN,productReview,failed
global pages,failed,ASIN_LIST
if pages==0:
initASIN_LIST()
product_dict={"ASIN":[],"Name":[]}
productDetails = {"ASIN":[],"Average Rating":[],"TotalRating":[],"Price":[],"Features":[]}
Description = {"ASIN":[],"ShortDescription":[],"LongDescription":[]}
productReview = {"ASIN":[],"Date":[],"Rating":[],"Title":[],"Body":[]}
productQA = {"ASIN":[],"Question":[],"Answer":[]}
productInformation={"ASIN":[]} #Rest of the fields are optional
productRating={"ASIN":[],"5":[],"4":[],"3":[],"2":[],"1":[]}
ASIN=""
s=getSoup(url)
if s is not None:
s_span = get(s,'span','id')
try:
title = [t for t in s_span if t['id']=="productTitle"]
title = title[0].text.strip()
numberOfRating = [r for r in s_span if r['id']=="acrCustomerReviewText"]
if len(numberOfRating)>0:
numberOfRating = numberOfRating[0].text.strip()
else:
numberOfRating="Unk"
averageRating = [i for i in s_span if i['id']=="acrPopover"]
if len(averageRating)>0:
averageRating = averageRating[0].text.strip()
else:
averageRating="Unk"
productPrice = [p for p in s_span if (p['id']=="priceblock_ourprice" or p['id']=="priceblock_saleprice")]
if len(productPrice)>0:
productPrice = productPrice[0].text
else:
productPrice ="Unk"
s_div = get(s,'div','id')
features = [f for f in s_div if f['id']=="feature-bullets"]
if len(features)>0:
features = features[0].text.strip().replace('\n','').replace('\t','')
else:
features=""
try:
product_Information =[pi for pi in s_div if pi['id']=='prodDetails']
pi_th = get(product_Information[0],'th')
pi_td = get(product_Information[0],'td')
pi_th_text = [t.text.strip() for t in pi_th if t.text.strip()!='']
pi_td_text = [t.text.strip().replace('\n','').replace('\t','') for t in pi_td if t.text.strip()!='']
#print(pi_th_text,pi_td_text)
label_col = []
if pages!=0:
columns = pd.read_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductInformation.xlsx').columns
else:
columns= None
#print(columns)
for i in range(len(pi_th_text)):
if pi_th_text[i]!="Customer Reviews":
if pi_th_text[i]=="ASIN":
ASIN = pi_td_text[i]
label_col.append(pi_th_text[i])
if columns is None:
if pi_th_text[i] not in productInformation.keys() :
productInformation[pi_th_text[i]]=[]
productInformation[pi_th_text[i]].append(pi_td_text[i])
else:
productInformation[pi_th_text[i]].append(pi_td_text[i])
else:
if pi_th_text[i] not in productInformation.keys() and pi_th_text[i] in columns:
productInformation[pi_th_text[i]]=[]
productInformation[pi_th_text[i]].append(pi_td_text[i])
elif pi_th_text[i] in columns:
productInformation[pi_th_text[i]].append(pi_td_text[i])
#for i in productInformation.keys():
# if i not in label_col:
# productInformation[i].append("")
if len(pi_th_text)==0:
heading=""
body=""
for i in range(0,len(pi_td_text)-1,2):
#print(i,len(pi_td_text))
heading = pi_td_text[i]
body = pi_td_text[i+1]
#print(i,heading,body)
if heading=="ASIN":
ASIN = body
#print(ASIN)
if heading!="Customer Reviews":
if columns is None:
if heading not in productInformation.keys():
productInformation[heading]=[]
productInformation[heading].append(body)
else:
productInformation[heading].append(body)
else:
if heading not in productInformation.keys() and heading in columns:
productInformation[heading]=[]
productInformation[heading].append(body)
elif heading in columns:
productInformation[heading].append(body)
except:
ASIN="Not available"
#print(sys.exc_info())
#print(traceback.format_exc())
if ASIN not in ASIN_LIST:
productDescription = [p for p in s_div if p['id']=="aplus"]
if len(productDescription)!=0:
h3_title = get(productDescription[0],'h3')
h4_title = get(productDescription[0],'h4')
p_description = get(productDescription[0],'p')
h3_title_text = [text.text.strip() for text in h3_title if text.text!="" and text.text.strip()!='']
p_description_text = [text.text.strip() for text in p_description if text.text!="" and text.text is not None and text.text.strip()!='']
h4_title_text =[text.text.strip() for text in h4_title if text.text!="" and text.text.strip()!='']
j=0
for i in range(len(h3_title_text)):
if h3_title_text[i] not in ["OTHER FEATURES","FEATURES"]:
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h3_title_text[i])
Description['LongDescription'].append(p_description_text[j])
#product_description[h3_title_text[i]]=p_description_text[j]
j+=1
for i in range(len(h4_title_text)):
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(h4_title_text[i])
if j<len(p_description_text)-1:
Description['LongDescription'].append(p_description_text[j])
else:
Description['LongDescription'].append("")
#product_description[h4_title_text[i]]=p_description_text[j]
j+=1
else:
productDescription = [p for p in s_div if p['id']=="productDescription"]
#print(productDescription)
if len(productDescription)>0:
productDescription_b = get(productDescription[0],'b')
for i in productDescription_b:
#print(i.text.strip(),getNextSiblingText(i).strip())
if getNextSiblingText(i).strip()!='':
Description['ASIN'].append(ASIN)
Description['ShortDescription'].append(i.text.strip())
Description['LongDescription'].append(getNextSiblingText(i).strip())
# product_description[i.text.strip()] = getNextSiblingText(i).strip()
#print(Description)
qa_desc = [q for q in s_div if q['id']=='ask_lazy_load_div']
qa_url = qa_desc[0].a['href']
#QA = {}
while qa_url!='':
productQA,qa_url=parseQA(qa_url,productQA,ASIN)
review_summary = [d for d in s_div if d['id']=='reviewsMedley'][0]
rev_span = get(review_summary,'span','class')
#global productRating
rev_span = [r for r in rev_span if r['class']==["a-size-base"]]
#print(rev_span)
productRating['ASIN'].append(ASIN)
for i in [0,2,4,6,8]:
if "1" in rev_span[i].text.strip():
productRating["1"].append(rev_span[i+1].text.strip())
elif "2" in rev_span[i].text.strip():
productRating["2"].append(rev_span[i+1].text.strip())
elif "3" in rev_span[i].text.strip():
productRating["3"].append(rev_span[i+1].text.strip())
elif "4" in rev_span[i].text.strip():
productRating["4"].append(rev_span[i+1].text.strip())
else:
productRating["5"].append(rev_span[i+1].text.strip())
# rating[rev_span[i].text.strip()] = rev_span[i+1].text.strip()
rev_div = get(review_summary,'div','id')
rev_div_footer = [r for r in rev_div if r['id']=="reviews-medley-footer" or "footer" in r['id']]
#print(len(rev_div_footer),rev_div_footer)
if len(rev_div_footer)>0:
try:
if 'https://www.amazon.com' in rev_div_footer[0].a['href']:
rating_url = rev_div_footer[0].a['href']
else:
rating_url = 'https://www.amazon.com'+rev_div_footer[0].a['href']
except:
rating_url = None
while rating_url is not None:
rating_url,productReview=parseReview(rating_url,productReview,ASIN)
product_dict['ASIN'].append(ASIN)
product_dict['Name'].append(title)
productDetails['ASIN'].append(ASIN)
productDetails['Average Rating'].append(averageRating)
productDetails['TotalRating'].append(numberOfRating)
productDetails['Price'].append(productPrice)
productDetails['Features'].append(features)
#(productReview)
#print(productRating)
print("URL processed",pages+1)
if pages==0:
pd.DataFrame(product_dict).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDictionary.xlsx')
pd.DataFrame(productDetails).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/ProductDetails.xlsx')
pd.DataFrame(Description).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/Description.xlsx')
pd.DataFrame(productQA).to_excel('/Users/shashanknigam/downloads/nlp_project/shopBot/webscraping/AmazonDataSet/QA_'+ASIN+'.xlsx')
| pd.DataFrame(productInformation) | pandas.DataFrame |
# importiamo i pacchetti necessari
import pandas as pd
import matplotlib.pyplot as plt
# l'indirizzo da cui vogliamo scaricare la tabella
pageURL = 'https://it.wikipedia.org/wiki/Leone_d%27oro_al_miglior_film'
# facciamo scaricare la pagina direttamente a pandas, dando indizi su qual e' la tabella che ci interessa
# "match" : la tabella deve contenere la stringa "Anno"
# "header": la prima riga contiene i nomi delle colonne
tables = | pd.read_html(pageURL, match='Anno', header=0) | pandas.read_html |
import conn,numpy as np
import pandas as pd
from flask import jsonify,json
db = conn.cursor
def add_examen(db,titel,vak,klas):
sql = "INSERT INTO examen(examen_titel,vak,klas) value('" + titel + "','" + vak + "','" + klas + "')"
db.execute(sql)
conn.db.commit()
def import_vragen(path):
# df = pd.DataFrame(pd.read_csv (r'%s' %path), columns = ['Vraag','Antwoord1','Antwoord2','Antwoord3','Antwoord4','CorrecteAntwoord'])
df = pd.read_csv (r'%s' %path)
data = df.to_dict(orient='records')
for record in data:
# Antwoord1,Antwoord2,Antwoord3,Antwoord4,CorrecteAntwoord,Vraag = record
db.execute('''
INSERT INTO vragen (vraag, ant_1, ant_2,ant_3,ant_4)
VALUES (%s,%s,%s,%s,%s)
''',
(
record['Vraag'],
record['Antwoord1'],
record['Antwoord2'],
record['Antwoord3'],
record['Antwoord4']
)
)
print(record)
return 'True'
# print (df)
# sql= ""
# db.execute(sql)
# conn.db.commit()
def add_antw(db,file):
# import CSV
data = | pd.read_csv(file) | pandas.read_csv |
""" Module contains functions to retrieve
and process data from the database folder"""
import os
import numpy as np
import shutil
import csv
import pandas as pd
import pkg_resources
pd.options.mode.chained_assignment = None # default='warn'
ROOT = pkg_resources.resource_filename('optimol', '')
DATABASE = ROOT + '/database_chemspider'
def get_all_dataset(set1=None, set2=0):
"""
Get all dataset from the database and combine them to one dataframe
and the samples are randomly selected. When two return sets are requested,
the samples are randomly picked from the same list, matching values
between two sets can happen.
:param set1: amount of samples wanted for the first set
:param set2: amount of samples wanted for the second set
:type set1: int
:type set2: int
:return dataframe contains all of the datasets
"""
if False in [isinstance(set1, int),
isinstance(set2, int)]:
raise TypeError()
id_list = get_id()
max_length = len(id_list)
if True in [set1 < 0, set2 < 0,
set1 == 0,
set1 > max_length,
set2 > max_length]:
raise ValueError()
set1_items = np.random.randint(0, max_length, set1)
set2_items = np.random.randint(0, max_length, set2)
train_set = pd.DataFrame()
# i = 0
for item in set1_items:
# print(str(i) + ': ' + str(item)) # for debugging
[coord_2d, _, coord_3d, _] = get_df_database(id_list[item])
# remove unwanted data
del coord_2d['atom']
del coord_2d['connect_to_2d']
del coord_2d['2d_z']
del coord_3d['atom']
del coord_3d['connect_to_3d']
# combine dataframes into one
coord = pd.concat([coord_2d, coord_3d], axis=1)
coord.insert(0, column='id', value=id_list[item]) # add id value
# pd.concat([all_dataset, coord])
train_set = train_set.append(coord, ignore_index=True)
test_set = pd.DataFrame()
if set2 >= 1:
for item in set2_items:
# print(str(i) + ': ' + str(item)) # for debugging
[coord_2d, _, coord_3d, _] = get_df_database(id_list[item])
# remove unwanted data
del coord_2d['atom']
del coord_2d['connect_to_2d']
del coord_2d['2d_z']
del coord_3d['atom']
del coord_3d['connect_to_3d']
# combine dataframes into one
coord = pd.concat([coord_2d, coord_3d], axis=1)
coord.insert(0, column='id', value=item) # add id value
# pd.concat([all_dataset, coord])
test_set = test_set.append(coord, ignore_index=True)
if set2 >= 1:
return [train_set, test_set]
return train_set
def get_df_user(file_list):
"""
Prepare user input to correct format to feed into the model
:param file_list: list of file directory from the user
:type file_list list
:return: dataframe of compiled user input in correct format
:rtype pandas.DataFrame
"""
if False in [isinstance(file_list, list),
all(isinstance(n, str) for n in file_list),
all(n.endswith('.txt') for n in file_list)]:
raise TypeError('Input must be text files in a list.')
i = 0
user_set = pd.DataFrame()
for filename in file_list:
# read user input text file to dateframe and clean up data
[coord, bond] = get_df(filename, dim=2)
# check for invalid 2D coord
if any(coord['2d_z'] != 0):
err_msg = 'check' + filename + '! Z coord. in 2d must be all zero'
raise ValueError(err_msg)
[coord, bond] = trim_hydrogen(coord, bond)
print(len(coord))
# check for more than 4 atoms
if len(coord) < 4:
raise ValueError('The amount of non H atoms must be more than 3.')
coord = atom_periodic_number_convert(coord)
coord = atom_connect(coord, bond)
coord.insert(0, column='id', value=i) # add id value
i = i + 1
# trim unnecessary data
del coord['2d_z']
del coord['atom']
user_set = user_set.append(coord, ignore_index=True)
return user_set
def get_df_database(id_num, raw=False, hydrogen=False):
"""
Access the database folder using the id number
to get a list of dataframes contain 2D and 3D data
:param id_num: id number of the molecule
:param raw: return raw dataframes from web server without processing
:param hydrogen: return dataframes in without trimming Hydrogen
:type id_num: int, str
:type raw: bool
:type hydrogen: bool
:return coord_2d: atom 2D coordinates
:return bond_2d: atom 2D bonding types and arrangement
:return coord_3d: atom 3D coordinates
:return bond_3d: atom 3D bonding types and arrangement
:rtype coord_2d, bond_2d, coord_3d, bond_3d: pandas.DataFrame
"""
# check input type
if False in [isinstance(id_num, (int, str)),
isinstance(raw, bool)]:
raise TypeError()
# in case id is string type
if isinstance(id_num, str):
# check for numeric input
if not id_num.isdigit():
raise ValueError('Invalid ID number')
# cast to int if numeric
id_num = int(id_num)
# check valid id
if id_num not in get_id():
raise ValueError()
# get dataframe of 2d coord and bonding
filename_2d = str(id_num) + '_2d.txt'
[coord_2d, bond_2d] = get_df(DATABASE + '/' + filename_2d, dim=2)
# get dataframe of 3d coord and bonding
filename_3d = str(id_num) + '_3d.txt'
[coord_3d, bond_3d] = get_df(DATABASE + '/' + filename_3d, dim=3)
# trim hydrogen
if False is hydrogen:
[coord_2d, bond_2d] = trim_hydrogen(coord_2d, bond_2d)
[coord_3d, bond_3d] = trim_hydrogen(coord_3d, bond_3d)
# process the data if raw is not requested
if False is raw:
coord_2d = atom_periodic_number_convert(coord_2d)
coord_3d = atom_periodic_number_convert(coord_3d)
coord_2d = atom_connect(coord_2d, bond_2d)
coord_3d = atom_connect(coord_3d, bond_3d)
return [coord_2d, bond_2d, coord_3d, bond_3d]
def trim_hydrogen(coord_input, bond_input):
"""
Return a copy of the same dataframe after removing Hydorgen atom
:param coord_input: coordinate dataframe
:param bond_input: bond dataframe
:type coord_input: pandas.DataFrame
:return coord, bond: same as input but without H
:rtype coord, bond: pandas.DataFrame
"""
# check input type
if False in [isinstance(coord_input, pd.DataFrame),
isinstance(bond_input, pd.DataFrame)]:
raise TypeError()
coord = coord_input.copy()
bond = bond_input.copy()
dim = '_' + coord.columns.values[0][0:2]
period_name = 'periodic_#' + dim
if period_name not in coord_input:
coord = atom_periodic_number_convert(coord)
# trim row with periodic number of 1 (H)
coord = coord[coord[period_name] != 1]
coord = coord.reset_index(drop=True) # reset index
# trim row with connection to H
size = coord.count()[0]
bond = bond[bond['atom_2'] <= size]
bond = bond[bond['atom_1'] <= size]
bond = bond.reset_index(drop=True) # reset index
if period_name not in coord_input:
del coord[period_name]
return [coord, bond]
def atom_connect(coord_input, bond_input):
"""
Create array contains connection info to the atom
and put it into a new dataframe column
:param coord_input: dataframe to be updated with new column of connection
:param bond_input: dataframe contain atom pairs and the connections
:type coord_input: pandas.DataFrame
:type bond_input: pandas.DataFrame
:return coord same dataframe as coord_input with added column of connection
"""
# check input type
if False in [isinstance(coord_input, pd.DataFrame),
isinstance(bond_input, pd.DataFrame)]:
raise TypeError()
# check if result column already exist
if True in ['connect_to_2d' in coord_input.columns,
'connect_to_3d' in coord_input.columns]:
raise ValueError('connect_to column already existed')
coord = coord_input.copy()
bond = bond_input
dim = '_' + coord.columns.values[0][0:2]
# set up empty columns
connect_col = 'connect_to' + dim
coord[connect_col] = np.empty((len(coord), 0)).tolist()
coord['bond_1' + dim] = np.zeros(len(coord))
coord['bond_2' + dim] = np.zeros(len(coord))
coord['bond_3' + dim] = np.zeros(len(coord))
coord['bond_4' + dim] = np.zeros(len(coord))
# create a list of other atoms that the each atom connect to
for i in range(len(bond_input)):
atom_1 = bond['atom_1'][i]
atom_2 = bond['atom_2'][i]
bond_type = bond['bond_type'][i]
coord['bond_' + str(bond_type) + dim][atom_1 - 1] \
= coord['bond_' + str(bond_type) + dim][atom_1 - 1] + 1
coord['bond_' + str(bond_type) + dim][atom_2 - 1] \
= coord['bond_' + str(bond_type) + dim][atom_2 - 1] + 1
# duplication is for double and triple bond
for j in range(int(bond['bond_type'][i])):
# subtract 1 to shift values to zero-based
coord[connect_col][atom_1 - 1].append(atom_2 - 1)
coord[connect_col][atom_2 - 1].append(atom_1 - 1)
# convert to array and pad -1
max_bond_amount = 8 # based on sulfur
for i in range(len(coord)):
max_fill = max_bond_amount - len(coord[connect_col][i])
if max_fill > 0:
coord[connect_col][i] = np.pad(np.array(coord[connect_col][i]),
(0, max_fill),
'constant', constant_values=-1)
# reformatting
coord['bond_1' + dim] = coord['bond_1' + dim].astype('int32')
coord['bond_2' + dim] = coord['bond_2' + dim].astype('int32')
coord['bond_3' + dim] = coord['bond_3' + dim].astype('int32')
coord['bond_4' + dim] = coord['bond_4' + dim].astype('int32')
return coord
def atom_periodic_number_convert(coord_input):
"""
Add a new column contain periodic number of the corresponding atom
:param coord_input: coordinate dataframe of 2D or 3D data
:type: pandas.DataFrame
:return coord: same dataframe with added column of periodic number
"""
# check input type
if False in [isinstance(coord_input, pd.DataFrame)]:
raise TypeError()
# check if result column already exist
if True in ['periodic_#_2d' in coord_input.columns,
'periodic_#_3d' in coord_input.columns]:
raise ValueError('periodic_# column already existed')
element = dict({'C': 6, 'O': 8, 'H': 1, 'N': 7,
'Br': 37, 'S': 16, 'I': 53,
'F': 9, 'B': 5}) # periodic info
coord = coord_input.copy()
dim = '_' + coord.columns.values[0][0:2]
col_name = 'periodic_#' + dim
coord[col_name] = None
# find atom symbol and arrange the number
for i in range(coord.shape[0]):
for elem in element.keys():
if elem in coord['atom'][i]:
coord[col_name][i] = element[elem]
if None is coord[col_name][i]:
unk_atom = coord['atom'][i]
err_msg = unk_atom + ' element not in the dict, need to be added.'
raise ValueError(err_msg)
coord[col_name] = coord[col_name].astype('int32') # reformatting
return coord
def get_df(filename, dim=2):
"""
Extract the atom coordinates and bonding data from txt file
according to provided dimension
Can be used for both database and user input file
:param filename: text file name
:param dim: dimension of the molecule structure in the text file
:type filename: str
:type dim int
:return coord, bond: coordinate and bonding data from the text file
:rtype coord, bond: pandas.DataFrame
"""
# check input type
if False in [isinstance(filename, str),
filename.endswith('.txt'),
isinstance(dim, int)]:
raise TypeError()
# dimension input value
if dim not in [2, 3]:
raise ValueError('Invalid dimension!')
raw = pd.read_csv(filename)
# get the number of atoms to cut off the text rows
check = False
i = 0
atom = None
bond_amount = None
# go through each row
while check is False:
values = raw.iloc[i, 0].split()
i = i + 1
# check the first value to be int, which is number of atoms
if values[0].isdigit():
atom = int(values[0])
bond_amount = int(values[1])
check = True
# crop the dataframe according to number of atoms and number of bonds
raw_coord = pd.DataFrame(raw.iloc[i:i+atom, 0])
raw_bond = | pd.DataFrame(raw.iloc[i+atom:i+atom+bond_amount]) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import os
import pandas as pd
def plot(directory,title,xlabel,ylabel):
root = os.path.expanduser(directory)
frames = []
for filename in os.listdir(root):
name, extension = os.path.splitext(filename)
if extension == ".csv":
try:
x, y = np.loadtxt(os.path.join(root,filename), skiprows=0, delimiter=';', unpack=True)
except:
print(("Could not load values from "+filename))
continue
frame = pd.DataFrame(data=y,columns=[name],index=x)
frames.append(frame)
dataframe = | pd.concat(frames,axis=1) | pandas.concat |
import plotly
import plotly.express as px
import plotly.graph_objects as go
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output,State
from dash.exceptions import PreventUpdate
import os
import json
import urllib
import requests
import pandas as pd
import numpy as np
from flask import Flask, request, json, jsonify, make_response
from watson_machine_learning_client import WatsonMachineLearningAPIClient
import encodingdata
total_cropyield=[]
total_demand=[]
iyear_org=0
# app creation
app = dash.Dash(__name__,suppress_callback_exceptions=True)
server = app.server
# data for Map
with open('states_new.json') as f:
data_map = json.load(f)
# data for malnutrition
df_data = pd.read_csv("newimn.csv")
df1=pd.read_csv("in_avg_mal.csv")
dff = df_data.groupby('State/UT', as_index=False)[['Underweight','Stunting ','Wasting ']].sum()
print (dff[:5])
df2 = pd.read_csv('All_Ymalnutri.csv')
fig1 = go.Figure()
app.layout = html.Div([
html.Div(id='page-content',children=[
html.Div([html.H1("Demand and Supply Predictor", style={'text-align':'center'})]),
html.Div([
dcc.Location(id='url',refresh=False),
html.Br(),
dcc.Link('Go to Malnutrition',href='/page2'),
]),
html.Div([
dcc.Dropdown(id='istate',
options=[
{'label':'Andhra Pradesh','value':'Andhra Pradesh'},
{'label':'Arunachal Pradesh','value':'Arunachal Pradesh'},
{'label':'Assam','value':'Assam'},
{'label':'Bihar','value':'Bihar'},
{'label':'Chhattisgarh','value':'Chhattisgarh'},
{'label':'Goa','value':'Goa'},
{'label':'Gujarat','value':'Gujarat'},
{'label':'Haryana','value':'Haryana'},
{'label':'Himachal Pradesh','value':'Himachal Pradesh'},
{'label':'Jharkhand','value':'Jharkhand'},
{'label':'Karnataka','value':'Karnataka'},
{'label':'Kerala','value':'Kerala'},
{'label':'Madhya Pradesh','value':'Madhya Pradesh'},
{'label':'Maharastra','value':'Maharastra'},
{'label':'Manipur','value':'Manipur'},
{'label':'Meghalaya','value':'Meghalaya'},
{'label':'Mizoram','value':'Mizoram'},
{'label':'Nagaland','value':'Nagaland'},
{'label':'Odisha','value':'Odisha'},
{'label':'Punjab','value':'Punjab'},
{'label':'Rajasthan','value':'Rajasthan'},
{'label':'Sikkim','value':'Sikkim'},
{'label':'Tamil Nadu','value':'Tamil Nadu'},
{'label':'Tripura','value':'Tripura'},
{'label':'Uttar Pradesh','value':'Uttar Pradesh'},
{'label':'Uttarakhand','value':'Uttarakhand'},
{'label':'West Bengal','value':'West Bengal'},
{'label':'Jammu and Kashmir','value':'Jammu and Kashmir'},
{'label':'Chandigarh','value':'Chandigarh'},
{'label':'Delhi','value':'Delhi'},
{'label':'Dadra and Nagra Haveli','value':'Dadra and Nagra Haveli'},
{'label':'Puducherry','value':'Puducherry'},
{'label':'Andaman and Nicobar Islands','value':'Andaman and Nicobar Islands'}
],
multi=False,
placeholder='Select a State',
style={'width':'40%'}
),
html.Br(),
dcc.Dropdown(id='icrop',
options=[
{'label':'Rice','value':'Rice'},
{'label':'Wheat','value':'Wheat'},
{'label':'Jowar','value':'Jowar'},
{'label':'Bajra','value':'Bajra'},
{'label':'Potato','value':'Potato'},
{'label':'Tomato','value':'Tomato'},
{'label':'Onion','value':'Onion'}
],
multi=False,
placeholder='Select a Crop',
style={'width':'40%'}
),
html.Br(),
dcc.Input(id="iyear",
placeholder='Enter the Year',
type='number',
inputMode='numeric',
value='',
required=True
),
html.Br(),
dcc.Input(
id='iarea',
type='number',
placeholder='Enter the expected yield area',
),
html.Br(),
html.Button(id='predict',n_clicks=0,children='Predict'),
]),
html.Br(),
html.Br(),
html.Div([
html.H2('Demand-Supply India Map'),
dcc.Graph(id='country_map'),
],style={'float':'left','height':'100%','width':'50%'}),
html.Div([
html.H2('Demand-Supply Gap'),
dcc.Graph(id='demandsupply')
],style={'float':'right','height':'100%','width':'50%'}),
html.Br(),
html.Br(),
html.Div([
dcc.Graph(id='foodinsecurity')
],style={'float':'left','height':'100%','width':'50%'}),
html.Div([
html.H2('Model Suggestion'),
html.Div([
html.Div(id='riskoutput')
])
],style={'float':'right','height':'100%','width':'50%'}),
])])
# Malnutrition layout
mal_nutrition = html.Div([
html.Div(id='page-content-back',children=[
html.Div([
html.H1("Malnutrition in India", style={'text-align': 'center'}),
html.Div([
dcc.Location(id='url_back',refresh=False),
html.Br(),
dcc.Link('Go to Home',href='/page1'),
]),
dcc.Dropdown(id="slct_year",
options=[
{"label": "1992", "value": 1992},
{"label": "2005", "value": 2005},
{"label": "2015", "value": 2015}],
multi=False,
value=2015,
style={'width': "40%"}
),
html.Div(id='output_container', children=[]),
html.Br(),
dcc.Graph(id='my_bee_map', figure={})
],),
html.Div([
dash_table.DataTable(
id='datatable_id',
data=dff.to_dict('records'),
columns=[
{"name": i, "id": i, "deletable": False, "selectable": False} for i in dff.columns
],
editable=False,
# filter_action="native",
sort_action="native",
sort_mode="multi",
row_selectable="multi",
column_selectable="single",
selected_columns=[],
row_deletable=False,
selected_rows=[],
page_action="native",
page_current= 0,
page_size= 6,
# page_action='none',
# style_cell={
# 'whiteSpace': 'normal'
# },
# fixed_rows={ 'headers': True, 'data': 0 },
# virtualization=False,
style_cell_conditional=[
{'if': {'column_id': 'State/UT'},
'width': '100%', 'textAlign': 'left'},
{'if': {'column_id': 'Underweight'},
'width': '25%', 'textAlign': 'left','display':'none'},
{'if': {'column_id': 'Stunting '},
'width': '23%', 'textAlign': 'left','display':'none'},
{'if': {'column_id': 'Wasting '},
'width': '22%', 'textAlign': 'left','display':'none'},
],
),
],className='row'),
html.Div([
html.Div([
dcc.Dropdown(id='linedropdown',
options=[
{'label': 'Underweight', 'value': 'Underweight'},
{'label': 'Stunting', 'value': 'Stunting '},
{'label': 'Wasting', 'value': 'Wasting '}
],
value='Underweight',
multi=False,
clearable=False
),
],style={'width': '45%', 'margin-top':'50px', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(id='piedropdown',
options=[
{'label': 'Underweight', 'value': 'Underweight'},
{'label': 'Stunting', 'value': 'Stunting '},
{'label': 'Wasting', 'value': 'Wasting '}
],
value='Underweight',
multi=False,
clearable=False
),
],style={'width': '45%', 'margin-top':'-41px','float': 'right', 'display': 'block'})
],'''style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'}'''),
html.Div([
html.Div([
dcc.Graph(id='linechart'),
],style={'width': '49%', 'margin-top':'0px', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='piechart'),
],style={'width': '49%','margin-top':'15px','float': 'right', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='barchart'),
],style={'width': '49%', 'margin-top':'0px', 'display': 'inline-block'})
],className='row'),
])])
# METHODS
def demand_model_access(access_token,n_clicks,istate_val,iyear_val,icrop_val,state_boundary):
boundary_demand = []
demand_predictions = []
model_url = "https://eu-gb.ml.cloud.ibm.com/v3/wml_instances/0ca61b2e-96dc-4749-9cce-9d719e87e7f2/deployments/afc09fac-32dd-4ec5-972a-e28ec48c19e8/online"
instance_id = "0ca61b2e-96dc-4749-9cce-9d719e87e7f2"
call_headers = {'Content-Type': 'application/json',"Authorization":"Bearer "+access_token,"ML-Instance-ID":instance_id}
for state in state_boundary:
encoded_demand_data = encodingdata.encode_demand_data(n_clicks,state,iyear_val,icrop_val)
print(encoded_demand_data)
dstate = float(encoded_demand_data[0][0])
dyear = float(encoded_demand_data[0][1])
dcrop = float(encoded_demand_data[0][2])
demand_payload = {"fields":["State","Year","Crop"],"values":[[dstate,dyear,dcrop]]}
result = requests.post(url=model_url,json=demand_payload,headers=call_headers)
result_json = json.loads(result.text)
boundary_demand.append(result_json['values'][0][0][0])
for i in range(0,3):
encoded_demand_data = encodingdata.encode_demand_data(n_clicks,istate_val,iyear_val,icrop_val)
print(encoded_demand_data)
dstate = float(encoded_demand_data[0][0])
dyear = float(encoded_demand_data[0][1])
dcrop = float(encoded_demand_data[0][2])
demand_payload = {"fields":["State","Year","Crop"],"values":[[dstate,dyear,dcrop]]}
result = requests.post(url=model_url,json=demand_payload,headers=call_headers)
result_json = json.loads(result.text)
demand_predictions.append(result_json['values'][0][0][0])
iyear_val = iyear_val+1
return demand_predictions,boundary_demand
def demand_access_token_generator():
api_key = "<KEY>"
header = {"content-type":"application/x-www-form-urlencoded"}
post_param = urllib.parse.urlencode({
"apikey":api_key,
"grant_type":"urn:ibm:params:oauth:grant-type:apikey"
}).encode("UTF-8")
url = "https://iam.cloud.ibm.com/identity/token"
response = requests.post(url,post_param,header)
#print(response.text)
return json.loads(response.text)
def crop_model_access(access_token,n_clicks,istate_val,iyear_val,icrop_val,iarea_val,state_boundary):
crop_predictions = []
boundary_supply = []
model_url = "https://us-south.ml.cloud.ibm.com/v3/wml_instances/1e6bd516-623a-4e64-bdb6-1ea860a312ca/deployments/96111954-7717-4086-968d-5e26ddaf9854/online"
instance_id = "1e6bd516-623a-4e64-bdb6-1ea860a312ca"
call_headers = {'Content-Type': 'application/json',"Authorization":"Bearer "+access_token,"ML-Instance-ID":instance_id}
for state in state_boundary:
encoded_crop_data = encodingdata.encode_crop_data(n_clicks,state,iyear_val,icrop_val,iarea_val)
cstate = float(encoded_crop_data[0][0])
cyear = float(encoded_crop_data[0][1])
ccrop = float(encoded_crop_data[0][2])
carea = float(encoded_crop_data[0][3])
crop_payload = {"fields":["State","Year","Crop","Area"],"values":[[cstate,cyear,ccrop,carea]]}
result = requests.post(model_url,json=crop_payload,headers=call_headers)
result_json = json.loads(result.text)
boundary_supply.append(result_json['values'][0][0][0])
for i in range(0,3):
encoded_crop_data = encodingdata.encode_crop_data(n_clicks,istate_val,iyear_val,icrop_val,iarea_val)
cstate = float(encoded_crop_data[0][0])
cyear = float(encoded_crop_data[0][1])
ccrop = float(encoded_crop_data[0][2])
carea = float(encoded_crop_data[0][3])
crop_payload = {"fields":["State","Year","Crop","Area"],"values":[[cstate,cyear,ccrop,carea]]}
result = requests.post(model_url,json=crop_payload,headers=call_headers)
result_json = json.loads(result.text)
crop_predictions.append(result_json['values'][0][0][0])
iyear_val = iyear_val + 1
return crop_predictions,boundary_supply
def crop_access_token_generator():
api_key = "<KEY>"
header = {"content-type":"application/x-www-form-urlencoded"}
post_param = urllib.parse.urlencode({
"apikey":api_key,
"grant_type":"urn:ibm:params:oauth:grant-type:apikey"
}).encode("UTF-8")
url = "https://iam.cloud.ibm.com/identity/token"
response = requests.post(url,post_param,header)
#print(response.text)
return json.loads(response.text)
def defining_stateboundaries():
indian_states = ['Andhra Pradesh', 'Arunachal Pradesh', 'Assam', 'Bihar', 'Chhattisgarh', 'Goa', 'Gujarat', 'Haryana', 'Himachal Pradesh', 'Jharkhand', 'Karnataka', 'Kerala', 'Madhya Pradesh', 'Maharastra', 'Manipur', 'Meghalaya', 'Mizoram', 'Nagaland', 'Odisha', 'Punjab', 'Rajasthan', 'Sikkim', 'Tamil Nadu', 'Tripura', 'Uttar Pradesh', 'Uttarakhand', 'West Bengal', 'Jammu and Kashmir', 'Chandigarh', 'Delhi', 'Dadra and Nagar Haveli', 'Puducherry', 'Andaman and Nicobar Islands']
state_boundaries = dict.fromkeys(indian_states)
state_boundaries['Kerala'] = ['Tamil Nadu','Karnataka','Andhra Pradesh','Goa','Maharastra']
state_boundaries['Tamil Nadu'] = ['Kerala','Karnataka','Andhra Pradesh','Goa','Maharastra']
state_boundaries['Karnataka'] = ['Tamil Nadu','Kerala','Goa','Andhra Pradesh','Maharastra']
state_boundaries['Maharastra'] = ['Karnataka','Madhya Pradesh','Goa','Andhra Pradesh','Chhattisgarh','Gujarat']
state_boundaries['Goa'] = ['Karnataka','Maharastra','Kerala','Andhra Pradesh','Tamil Nadu']
state_boundaries['Andhra Pradesh'] = ['Karnataka','Maharastra','Tamil Nadu','Odisha','Chhattisgarh']
state_boundaries['Gujarat'] = ['Rajasthan','Madhya Pradesh','Maharastra','Uttar Pradesh','Haryana']
state_boundaries['Madhya Pradesh'] = ['Maharastra','Rajasthan','Uttar Pradesh','Chhattisgarh','Gujarat']
state_boundaries['Chhattisgarh'] = ['Odisha','Jharkhand','Madhya Pradesh','Maharastra','Uttar Pradesh']
state_boundaries['Odisha'] = ['Chhattisgarh','Jharkhand','West Bengal','Andhra Pradesh','Maharastra']
state_boundaries['Rajasthan'] = ['Gujarat','Madhya Pradesh','Uttar Pradesh','Haryana','Punjab']
state_boundaries['Uttar Pradesh'] = ['Madhya Pradesh','Rajasthan','Bihar','Haryana','Uttarakhand','Chhattisgarh']
state_boundaries['Bihar'] = ['Uttar Pradesh','Jharkhand','Sikkim','Madhya Pradesh','West Bengal','Chhattisgarh']
state_boundaries['Jharkhand'] = ['Bihar','West Bengal','Odisha','Chhattisgarh','Uttar Pradesh']
state_boundaries['West Bengal'] = ['Jharkhand','Odisha','Bihar','Sikkim','Assam']
state_boundaries['Haryana'] = ['Punjab','Rajasthan','Uttar Pradesh','Uttarakhand','Himachal Pradesh']
state_boundaries['Punjab'] = ['Haryana','Rajasthan','Himachal Pradesh','Jammu and Kashmir','Uttarakhand','Uttar Pradesh']
state_boundaries['Himachal Pradesh'] = ['Punjab','Uttarakhand','Jammu and Kashmir','Haryana','Uttar Pradesh']
state_boundaries['Jammu and Kashmir'] = ['Himachal Pradesh','Punjab','Uttarakhand','Haryana','Uttar Pradesh','Rajasthan']
state_boundaries['Uttarakhand'] = ['Uttar Pradesh','Himachal Pradesh','Haryana','Punjab','Rajasthan','Jammu and Kashmir']
state_boundaries['Sikkim'] = ['West Bengal','Assam','Bihar','Meghalaya','Jharkhand']
state_boundaries['Assam'] = ['Meghalaya','Nagaland','Arunachal Pradesh','West Bengal','Manipur','Mizoram']
state_boundaries['Arunachal Pradesh'] = ['Assam','Nagaland','Manipur','Meghalaya','Mizoram','Tripura']
state_boundaries['Meghalaya'] = ['Assam','West Bengal','Tripura','Mizoram','Manipur','Nagaland']
state_boundaries['Nagaland'] = ['Assam','Manipur','Arunachal Pradesh','Meghalaya','Mizoram']
state_boundaries['Manipur'] = ['Assam','Nagaland','Mizoram','Meghalaya','Tripura']
state_boundaries['Mizoram'] = ['Manipur','Tripura','Assam','Meghalaya','Nagaland']
state_boundaries['Tripura'] = ['Mizoram','Assam','Manipur','Meghalaya','Nagaland']
state_boundaries['Delhi'] = ['Uttar Pradesh','Haryana','Rajasthan','Madhya Pradesh','Punjab']
state_boundaries['Dadra and Nagar Haveli'] = ['Gujarat','Maharastra','Madhya Pradesh','Rajasthan','Goa']
state_boundaries['Puducherry'] = ['Tamil Nadu','Andhra Pradesh','Kerala','Karnataka','Odisha']
state_boundaries['Andaman and Nicobar Islands'] = ['Andhra Pradesh','Tamil Nadu','Odisha','Kerala','West Bengal']
state_boundaries['Chandigarh'] = ['Punjab','Haryana','Himachal Pradesh','Uttarakhand','Uttar Pradesh']
return indian_states,state_boundaries
#page 1 - Home page
@app.callback(
[Output(component_id='country_map',component_property='figure'),
Output('demandsupply','figure'),
Output(component_id='foodinsecurity',component_property='figure'),
Output('riskoutput','children')],
[Input(component_id='predict',component_property='n_clicks')],
[State(component_id='istate',component_property='value'),
State(component_id='icrop',component_property='value'),
State(component_id='iyear',component_property='value'),
State(component_id='iarea',component_property='value')]
)
def update_output(n_clicks,istate_val,icrop_val,iyear_val,iarea_val):
global total_cropyield,total_demand,iyear_org
if (istate_val is None) or (icrop_val is None) or (iyear_val is None):
raise PreventUpdate
else:
print(istate_val)
print(type(istate_val))
print(icrop_val)
print(type(icrop_val))
print(iyear_val)
print(type(iyear_val))
print(iarea_val)
print(type(iarea_val))
iyear_org = iyear_val
# boundaries
states,state_boundaries = defining_stateboundaries()
state_list = state_boundaries[istate_val]
# Demand Model
print("In Demand Model")
access_token_result = demand_access_token_generator()
access_token = access_token_result['access_token']
model_output,boundary_output = demand_model_access(access_token,n_clicks,istate_val,iyear_val,icrop_val,state_list)
#demand_prediction = model_output['values'][0][0]
total_boundary_demand = []
total_demand = []
for i in range(0,3):
val = np.array(model_output[i])
temp = encodingdata.decode_demand_data(val)
demand = temp[0][0]
total_demand.append(demand)
print("Demand Prediction for Year {} is {}".format(i,demand))
for i in range(0,len(boundary_output)):
val = np.array(boundary_output[i])
temp = encodingdata.decode_demand_data(val)
demand = temp[0][0]
total_boundary_demand.append(demand)
print("{} Demand Prediction for Year {} is {}".format(state_list[i],i,demand))
#Crop Model
print("In Crop Model")
iyear_val = iyear_org
access_token_result = crop_access_token_generator()
access_token = access_token_result['access_token']
model_output1,boundary_output1 = crop_model_access(access_token,n_clicks,istate_val,iyear_val,icrop_val,iarea_val,state_list)
total_cropyield = []
total_boundary_supply = []
for i in range(0,3):
val = np.array(model_output1[i])
temp = encodingdata.decode_crop_data(val)
cropyield = temp[0][0]
total_cropyield.append(cropyield)
print("Crop Yield Prediction for Year {} is {}".format(i,cropyield))
for i in range(0,len(boundary_output1)):
val = np.array(boundary_output1[i])
temp = encodingdata.decode_crop_data(val)
cropyield = temp[0][0]
total_boundary_supply.append(cropyield)
print("{} Crop Yield Prediction for Year {} is {}".format(state_list[i],i,cropyield))
colour = plotly.colors.qualitative.Set1
red = colour[0]
green = colour[2]
yellow = colour[5]
gauge_value = 0
suggestion = 5
diff = total_cropyield[0] - total_demand[0]
if (diff > 1000):
colour_final = yellow
gauge_value = 0
suggestion = 1
elif (diff < 0):
colour_final = red
gauge_value = 100
suggestion = -1
elif(0<diff<1000):
colour_final = green
gauge_value = 50
suggestion = 0
colour_final = [[1,green],[1,yellow],[1,red]]
boundary_gap = [0] * len(total_boundary_demand)
# calculating the boundary states Demand and Supply Gaps
for i in range(0,len(total_boundary_demand)):
boundary_gap[i] = total_boundary_supply[i] - total_boundary_demand[i]
# classifying states into the import/export category for the commodity
export_import_condition = []
for i in range(0,len(boundary_gap)):
if boundary_gap[i]>1000:
export_import_condition.append(1)
elif (boundary_gap[i]<0):
export_import_condition.append(-1)
else:
export_import_condition.append(0)
#states_map = states
import_to = []
import_from = []
demand_list = [0] * len(states)
if suggestion == 1:
print("enters the I and E part")
for i in range(0,len(export_import_condition)):
if export_import_condition[i] == -1:
import_to.append(i)
value_to_state = []
for i in range(0,len(import_to)):
value_to_state.append(state_list[import_to[i]])
print("STSTAE NAMES")
print(value_to_state)
index_of_boundary_states =[]
index_of_input_state = states.index(istate_val)
for state in value_to_state:
index_of_boundary_states.append(states.index(state))
demand_list[index_of_input_state] = suggestion
for i in range(0,len(index_of_boundary_states)):
demand_list[index_of_boundary_states[i]] = -1
print("\n\n DEMAND LIST")
print(demand_list)
elif suggestion== -1:
print("entered the suggestion negative value")
for i in range(0,len(export_import_condition)):
if export_import_condition[i] == 1:
import_from.append(i)
value_to_state = []
for i in range(0,len(import_from)):
value_to_state.append(state_list[import_from[i]])
print("STSTAE NAMES")
print(value_to_state)
index_of_boundary_states =[]
index_of_input_state = states.index(istate_val)
for state in value_to_state:
index_of_boundary_states.append(states.index(state))
demand_list[index_of_input_state] = suggestion
for i in range(0,len(index_of_boundary_states)):
demand_list[index_of_boundary_states[i]] = 1
print("\n\n DEMAND LIST")
print(demand_list)
df = | pd.DataFrame({'id':states,'Demand':demand_list}) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from collections import OrderedDict
import gc
from current_clamp import *
from current_clamp_features import extract_istep_features
from visualization.feature_annotations import feature_name_dict
from read_metadata import *
from file_io import load_current_step
# from pymysql import IntegrityError
import datajoint as dj
schema = dj.schema('yueqi_ephys', locals())
FIG_DIR = 'analysis_current_clamp/figures_plot_recording'
'''
class DjImportedFromDirectory(dj.Imported):
# Subclass of Imported. Initialize with data directory.
def __init__(self, directory=''):
self.directory = directory
super().__init__()
'''
@schema
class EphysExperimentsForAnalysis(dj.Manual):
definition = """
# Ephys experiments (excel files) for analysis
experiment: varchar(128) # excel files to use for analysis
---
project: varchar(128) # which project the data belongs to
use: enum('Yes', 'No') # whether to use this experiment
directory: varchar(256) # the parent project directory
"""
def insert_experiment(self, excel_file):
'''
Insert new sample ephys metadata from excel to datajoint tables
'''
entry_list = pd.read_excel(excel_file)[['experiment', 'project', 'use', 'directory']].dropna(how='any')
entry_list = entry_list.to_dict('records')
no_insert = True
for entry in entry_list:
if entry['use'] == 'No':
continue
self.insert1(row=entry, skip_duplicates=True)
no_insert = False
#print("Inserted: " + str(entry))
if no_insert:
print("No new entry inserted.")
return
@schema
class Animals(dj.Imported):
definition = """
# Sample metadata
-> EphysExperimentsForAnalysis
---
id: varchar(128) # organod ID (use date, but need better naming)
strain : varchar(128) # genetic strain
dob = null: date # date of birth
date = null: date # recording date
age = null: smallint # nunmber of days (date - dob)
slicetype: varchar(128) # what kind of slice prep
external: varchar(128) # external solution
internal: varchar(128) # internal solution
animal_comment = '': varchar(256) # general comments
"""
def _make_tuples(self, key):
ephys_exp = (EphysExperimentsForAnalysis() & key).fetch1()
directory = os.path.expanduser(ephys_exp.pop('directory', None))
print('Populating for: ', key)
animal_info, _ = read_ephys_info_from_excel_2017(
os.path.join(directory, key['experiment'] + '.xlsx'))
key['id'] = animal_info['id']
key['strain'] = animal_info['strain']
if not pd.isnull(animal_info['DOB']): key['dob'] = animal_info['DOB']
if not pd.isnull(animal_info['age']): key['age'] = animal_info['age']
key['date'] = animal_info['date']
key['slicetype'] = animal_info['type']
key['external'] = animal_info['external']
key['internal'] = animal_info['internal']
if not | pd.isnull(animal_info['comment']) | pandas.isnull |
import argparse
from tqdm import tqdm
import re
import os
import json
import pandas as pd
from collections import Counter
pd.set_option('display.max_rows', 800)
| pd.set_option('display.max_columns', 800) | pandas.set_option |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
| pd.Timestamp("2015-01-12") | pandas.Timestamp |
"""
Copyright 2021, Institute e-Austria, Timisoara, Romania
http://www.ieat.ro/
Developers:
* <NAME>, <EMAIL>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import importlib
from edelogger import logger
from datetime import datetime
import time
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, cross_validate, cross_val_score, StratifiedShuffleSplit
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.neural_network import MLPClassifier
from sklearn import model_selection
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.cluster import DBSCAN
from sklearn.metrics import make_scorer, SCORERS, get_scorer, classification_report, confusion_matrix, accuracy_score
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from imblearn.metrics import classification_report_imbalanced
from yellowbrick.model_selection import LearningCurve, ValidationCurve, RFECV
from yellowbrick.classifier import PrecisionRecallCurve, ROCAUC
from yellowbrick.contrib.classifier import DecisionViz
from yellowbrick.style import set_palette
import matplotlib.pyplot as plt
import seaborn as sns
import yaml
import joblib
import pickle as pickle
from util import str2Bool
import glob
from util import ut2hum
import shap
from evolutionary_search import EvolutionaryAlgorithmSearchCV
from edetensorflow.edetensor import dnn_aspide
import itertools
pd.options.mode.chained_assignment = None
import warnings
warnings.filterwarnings("ignore")
# set color palette yellowbrick
set_palette('sns_deep')
class SciClassification:
def __init__(self, modelDir,
dataDir,
checkpoint,
export,
training,
validation,
validratio,
compare,
cv=None,
verbose=False,
learningcurve=None,
validationcurve=None,
prc=None,
rocauc=None,
rfe=None,
dboundary=None,
pred_analysis=False,
trainscore=False,
scorers=None,
returnestimators=False):
self.modelDir = modelDir
self.dataDir = dataDir
self.checkpoint = checkpoint
self.export = export
self.training = training
self.validation = validation
self.validratio = validratio
self.compare = compare
self.cv = cv
self.verbose = verbose
self.learningcurve = learningcurve
self.validationcurve = validationcurve
self.prc = prc
self.rocauc = rocauc
self.rfe = rfe
self.dboundary = dboundary
self.pred_analysis = pred_analysis
self.trainscore = trainscore
self.scorers = scorers
self.returnestimators = returnestimators
self.skscorer = 'sklearn.metrics'
self.sksplitgen = 'sklearn.model_selection'
def detect(self, method, model, data):
smodel = self.__loadClassificationModel(method, model)
anomalieslist = []
if not smodel:
dpredict = 0
else:
if data.shape[0]:
if isinstance(smodel, RandomForestClassifier):
print("Detected RandomForest model")
print("n_estimators -> %s" % smodel.n_estimators)
print("Criterion -> %s" % smodel.criterion)
print("Max_Features -> %s" % smodel.max_features)
print("Max_Depth -> %s" % smodel.max_depth)
print("Min_sample_split -> %s " % smodel.min_samples_split)
print("Min_sample_leaf -> %s " % smodel.min_samples_leaf)
print("Min_weight_fraction_leaf -> %s " % smodel.min_weight_fraction_leaf)
print("Max_leaf_nodes -> %s " % smodel.max_leaf_nodes)
print("Min_impurity_split -> %s " % smodel.min_impurity_split)
print("Bootstrap -> %s " % smodel.bootstrap)
print("Oob_score -> %s " % smodel.oob_score)
print("N_jobs -> %s " % smodel.n_jobs)
print("Random_state -> %s " % smodel.random_state)
print("Verbose -> %s " % smodel.verbose)
print("Class_weight -> %s " % smodel.class_weight)
try:
dpredict = smodel.predict(data)
print("RandomForest Prediction Array -> %s" % str(dpredict))
except Exception as inst:
logger.error('[%s] : [ERROR] Error while fitting randomforest model to event with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
dpredict = 0
elif isinstance(smodel, AdaBoostClassifier):
print("Detected AdaBoost model")
print("base_estimator -> %s" % smodel.base_estimator)
print("n_estimators -> %s" % smodel.n_estimators)
print("Learning_rate -> %s" % smodel.learning_rate)
print("Algorithm -> %s" % smodel.algorithm)
print("Random State -> %s" % smodel.random_state)
try:
dpredict = smodel.predict(self.df)
print("AdaBoost Prediction Array -> %s" % str(dpredict))
except Exception as inst:
logger.error('[%s] : [ERROR] Error while fitting AdaBoost model to event with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
sys.exit(1)
elif isinstance(smodel, DecisionTreeClassifier):
print("Detected Decision Tree model")
print("Criterion -> %s" % smodel.criterion)
print("Spliter -> %s" % smodel.splitter)
print("Max_Depth -> %s" % smodel.max_depth)
print("Min_sample_split -> %s " % smodel.min_samples_split)
print("Min_sample_leaf -> %s " % smodel.min_samples_leaf)
print("Min_weight_fraction_leaf -> %s " % smodel.min_weight_fraction_leaf)
print("Max_Features -> %s" % smodel.max_features)
print("Random_state -> %s " % smodel.random_state)
print("Max_leaf_nodes -> %s " % smodel.max_leaf_nodes)
print("Min_impurity_split -> %s " % smodel.min_impurity_split)
print("Class_weight -> %s " % smodel.class_weight)
try:
dpredict = smodel.predict(self.df)
print("Decision Tree Prediction Array -> %s" % str(dpredict))
except Exception as inst:
logger.error('[%s] : [ERROR] Error while fitting Decision Tree model to event with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
sys.exit(1)
elif isinstance(smodel, MLPClassifier):
print("Detected Neural Network model")
print("Hidden Layer size -> %s" % str(smodel.hidden_layer_sizes))
print("Activation -> %s" % smodel.activation)
print("Solver -> %s" % smodel.solver)
print("Alpha -> %s" % smodel.alpha)
print("Batch Size -> %s" % smodel.batch_size)
print("Learning rate -> %s" % smodel.learning_rate)
print("Max Iterations -> %s" % smodel.max_iter)
print("Shuffle -> %s" % smodel.shuffle)
print("Momentum -> %s" % smodel.momentum)
print("Epsilon -> %s" % smodel.epsilon)
try:
dpredict = smodel.predict(self.df)
print("MLP Prediction Array -> %s" % str(dpredict))
except Exception as inst:
logger.error('[%s] : [ERROR] Error while fitting MLP model to event with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst),
inst.args)
sys.exit(1)
else:
logger.error('[%s] : [ERROR] Unsuported model loaded: %s!',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(smodel))
sys.exit(1)
else:
dpredict = 0
logger.warning('[%s] : [WARN] Dataframe empty with shape (%s,%s)',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(data.shape[0]),
str(data.shape[1]))
print("Empty dataframe received with shape (%s,%s)" % (str(data.shape[0]),
str(data.shape[1])))
print("dpredict type is %s" % (type(dpredict)))
if type(dpredict) is not int:
data['AType'] = dpredict
for index, row in data.iterrows():
anomalies = {}
if row['AType'] != 0:
print(index)
print(data.get_value(index, 'AType'))
anomalies['utc'] = int(index)
anomalies['hutc'] = ut2hum(int(index))
anomalies['anomaly_type'] = data.get_value(index, 'AType')
anomalieslist.append(anomalies)
anomaliesDict = {}
anomaliesDict['anomalies'] = anomalieslist
logger.info('[%s] : [INFO] Detected anomalies with model %s using method %s are -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), model, method,
str(anomaliesDict))
return anomaliesDict
def dask_detect(self, method,
model,
data,
normal_label=None):
smodel = self.__loadClassificationModel(method=method, model=model)
anomaliesList = []
nl = 0
explainer = 0
plot = False
if not smodel:
dpredict = 0
else:
if data.shape[0]:
try:
logger.info('[{}] : [INFO] Loading predictive model {} '.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(smodel).split('(')[0]))
for k, v in smodel.get_params().items():
logger.info('[{}] : [INFO] Predict model parameter {} set to {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v))
dpredict = smodel.predict(data)
except Exception as inst:
logger.error('[{}] : [ERROR] Failed to load predictive model with {} and {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args))
dpredict = 0
else:
dpredict = 0
logger.warning('[{}] : [WARN] DataFrame is empty with shape {} '.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(data.shape)))
anomaliesDict = {}
if type(dpredict) is not int:
if normal_label is None: # Todo make normal_label user definable
nl = 0
else:
nl = normal_label
anomalyArray = np.argwhere(dpredict != nl) # Pandas bug where np.argwhere not working on dataframes
if self.pred_analysis and anomalyArray.shape[0]:
try:
plot = self.pred_analysis['Plot']
except Exception:
plot = False
df_anomaly_data = data.copy(deep=True) # copy for second filter using pandas
df_anomaly_data['target'] = dpredict
anomaliesDict['complete_shap_analysis'], explainer, shap_values = self.__shap_analysis(smodel,
df_anomaly_data,
normal_value=nl,
plot=plot)
count = 0 # todo merge filtering of dpred detection; related to np.argwhere bug for pandas and __shap_analysis data refiltering
for an in anomalyArray:
anomalies = {}
anomalies['utc'] = int(data.iloc[an[0]].name)
anomalies['hutc'] = ut2hum(int(data.iloc[an[0]].name))
anomalies['type'] = str(dpredict[an[0]])
if explainer:
anomalies['analysis'] = self.__shap_values_processing(explainer=explainer,
shap_values=shap_values,
label=dpredict[an[0]],
feature_names=data.columns,
instance=count)
if plot and count < 10: # todo make number of force plots user definable
self.__shap_force_plot(explainer=explainer, shap_values=shap_values, data=data,
label=dpredict[an[0]], instance=count, utc=anomalies['utc'])
anomaliesList.append(anomalies)
count += 1
anomaliesDict['anomalies'] = anomaliesList
logger.info('[{}] : [INFO] Detected {} anomalies with model {} using method {} '.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), len(anomaliesList), model,
str(smodel).split('(')[0]))
return anomaliesDict
def __shap_analysis(self,
model,
data,
normal_value,
plot):
# todo use non tokenized labels for data
"""
Shap analysis of incoming data
:param model: Predictive model to be analyzed
:param data: data for analysis
:param normal_value: denotes the normal (majority) class label
:return: feature importance
"""
logger.info('[%s] : [INFO] Executing classification prediction analysis ...',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
data_filtered = data.loc[data['target'] != normal_value]
data_filtered.drop(['target'], inplace=True, axis=1)
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(data_filtered)
try:
labels = model.classes_
except Exception as inst:
logger.error('[%s] : [ERROR] Prediction analysis failed with {} and {}',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
return 0
feature_imp = self.__shap_feature_importance(shap_values=shap_values,
data=data,
label=labels)
if plot:
self.__shap_summary_plot(shap_values, data_filtered, labels)
return feature_imp, explainer, shap_values
def __shap_values_processing(self,
explainer,
shap_values,
feature_names,
label,
instance):
"""
Used to export data as used by shap.force_plot on a per detection basis
:param explainer: Shape explainer object
:param shap_values: shapely values
:param feature_names: name of features from dataset
:param label: label after detection
:param instance: instance number as used in df.iloc
:return: shap_values on a per detection instance basis
"""
shap_values_d = {}
try:
shap_values_d['shap_values'] = dict(zip(feature_names, shap_values[label][instance]))
shap_values_d['expected_value'] = explainer.expected_value[label]
except Exception as inst:
logger.error('[{}] : [ERROR] Error while executing shap processing with {} and {} '.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args))
return shap_values_d
def __shap_summary_plot(self,
shap_values,
data,
label):
"""
Summary plots are a different representation of feature importance. This is a per class basis.
:param shap_values: Shapley values.
:param data: Data collected from monitoring or local file defined in by the user. Only anomalous instances are to be considered
:param label: List of anomaly labels
"""
count = 0
for l in label:
shap.summary_plot(shap_values[count], data, class_names=label, title=f"Summary Plot for class {l}",
show=False)
shap_summary_f = f"Shap_classificaiton_summary_class_{l}_{time.time()}.png"
plt.savefig(os.path.join(self.modelDir, shap_summary_f), bbox_inches="tight")
plt.close()
count += 1
def __shap_force_plot(self,
explainer,
shap_values,
data,
label,
instance,
utc):
"""
Shap based force plot to check feature value impact on particular prediction instance.
:param explainer: Shap Explainer
:param shap_values: Shapely values
:param data: Data collected from monitoring or local file defined in by the user. Only anomalous instances are to be considered
:param label: Label identified by predictive model for the particular datapoint to be analysed
:param instance: Identifier of the particular data point to be analyzed
:param utc: Timestamp when the particular datapoint has been measured
"""
shap.force_plot(explainer.expected_value[label], shap_values[label][instance], data.values[instance],
feature_names=data.columns, show=False, matplotlib=True, text_rotation=90) # only works if matplotlib not JS, text rotation mandatory for long feature names
shap_force_f = f"Shap_classificaiton_force_class_{label}_{utc}.png"
plt.savefig(os.path.join(self.modelDir, shap_force_f), bbox_inches="tight") # bbox_inches tight not working with long feature names
plt.close()
def __shap_feature_importance(self,
shap_values,
data,
label):
"""
Extracts feature importance from shapely values.
:param shap_values: shapely values
:param data: dataframe to be analysied
:param label: labels to be used, as extracted from predictive model
:return: dictionary containing feature importance for each label separately
"""
feature_importance_d = {}
if isinstance(label, list) or isinstance(label, np.ndarray):
for l in label:
feature_importance = pd.DataFrame(list(zip(data.columns.tolist(), shap_values[l].sum(0))),
columns=['feature_name', 'feature_importance_vals'])
feature_importance = feature_importance.iloc[
(-np.abs(feature_importance['feature_importance_vals'].values)).argsort()]
feature_importance_d[int(l)] = feature_importance.to_dict()
else:
feature_importance = pd.DataFrame(list(zip(data.columns.tolist(), shap_values[label].sum(0))),
columns=['feature_name', 'feature_importance_vals'])
feature_importance = feature_importance.iloc[
(-np.abs(feature_importance['feature_importance_vals'].values)).argsort()]
feature_importance_d[int(label)] = feature_importance.to_dict()
return feature_importance_d
def score(self, model, X, y):
return model.score(X, y)
def compare(self, modelList, X, y):
scores = []
for model in modelList:
scores.append(model.score(X, y))
logger.info('[%s] : [INFO] Best performing model score is -> %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), max(scores))
# for a, b in itertools.combinations(modelList, 2):
# a.score(X, y)
# b.score(X, y)
return modelList.index(max(scores))
def crossvalid(self, model, X, y, kfold):
return model_selection.cross_val_score(model, X, y, cv=kfold)
def naiveBayes(self):
return True
def adaBoost(self, settings,
data=None,
dropna=True):
if "n_estimators" not in settings:
print("Received settings for Ada Boost are %s invalid!" % str(settings))
logger.error('[%s] : [ERROR] Received settings for Decision Tree %s are invalid',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settings))
sys.exit(1)
dtallowedSettings = ["n_estimators", "learning_rate"]
for k, v in settings.items():
if k in dtallowedSettings:
logger.info('[%s] : [INFO] Ada Boost %s set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v)
print("Ada Boost %s set to %s" % (k, v))
if not isinstance(self.export, str):
mname = 'default'
else:
mname = self.export
df = self.__loadData(data, dropna)
features = df.columns[:-1]
X = df[features]
y = df.iloc[:, -1].values
seed = 7
# num_trees = 500
kfold = model_selection.KFold(n_splits=10, random_state=seed)
print(kfold)
ad = AdaBoostClassifier(n_estimators=settings['n_estimators'], learning_rate=settings['learning_rate'],
random_state=seed)
if self.validratio:
trainSize = 1.0 - self.validratio
print("Decision Tree training to validation ratio set to: %s" % str(self.validratio))
logger.info('[%s] : [INFO] Ada Boost training to validation ratio set to: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(self.validratio))
d_train, d_test, f_train, f_test = self.__dataSplit(X, y, testSize=self.validratio, trainSize=trainSize)
ad.fit(d_train, f_train)
predict = ad.predict(d_train)
print("Prediction for Ada Boost Training:")
print(predict)
print("Actual labels of training set:")
print(f_train)
predProb = ad.predict_proba(d_train)
print("Prediction probabilities for Ada Boost Training:")
print(predProb)
score = ad.score(d_train, f_train)
print("Ada Boost Training Score: %s" % str(score))
logger.info('[%s] : [INFO] Ada Boost training score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score))
feature_imp = list(zip(d_train, ad.feature_importances_))
print("Feature importance Ada Boost Training: ")
print(list(zip(d_train, ad.feature_importances_)))
logger.info('[%s] : [INFO] Ada Boost feature importance: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(feature_imp))
pred_valid = ad.predict(d_test)
print("Ada Boost Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(d_test)
score_valid = ad.score(d_test, f_test)
print("Ada Boost validation set score: %s" % str(score_valid))
logger.info('[%s] : [INFO] Ada Boost validation score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score_valid))
else:
ad.fit(X, y)
predict = ad.predict(X)
print("Prediction for Ada Boost Training:")
print(predict)
print("Actual labels of training set:")
print(y)
predProb = ad.predict_proba(X)
print("Prediction probabilities for Ada Boost Training:")
print(predProb)
score = ad.score(X, y)
print("Ada Boost Training Score: %s" % str(score))
fimp = list(zip(X, ad.feature_importances_))
print("Feature importance Ada Boost Training: ")
print(fimp)
dfimp = dict(fimp)
dfimp = pd.DataFrame(list(dfimp.items()), columns=['Metric', 'Importance'])
sdfimp = dfimp.sort('Importance', ascending=False)
dfimpCsv = 'Feature_Importance_%s.csv' % mname
sdfimp.to_csv(os.path.join(self.modelDir, dfimpCsv))
if self.validation is None:
logger.info('[%s] : [INFO] Validation is set to None',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# return True
else:
vfile = os.path.join(self.dataDir, self.validation)
logger.info('[%s] : [INFO] Validation data file is set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
if not os.path.isfile(vfile):
print("Validation file %s not found" % vfile)
logger.error('[%s] : [ERROR] Validation file %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
else:
df_valid = pd.read_csv(vfile)
if dropna:
df_valid = df_valid.dropna()
features_valid = df_valid.columns[:-1]
X_valid = df_valid[features_valid]
y_valid = df_valid.iloc[:, -1].values
pred_valid = ad.predict(X_valid)
print("Ada Boost Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(y_valid)
score_valid = ad.score(X_valid, y_valid)
print("Ada Boost set score: %s" % str(score_valid))
# return True
self.__serializemodel(ad, 'DecisionTree', mname)
return ad
def neuralNet(self, settings,
data=None,
dropna=True):
if "activation" not in settings:
print("Received settings for Neural Networks are %s invalid!" % str(settings))
logger.error('[%s] : [ERROR] Received settings for Neural Networks %s are invalid',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settings))
sys.exit(1)
rfallowedSettings = ["max_iter", "activation", "solver", "batch_size", "learning_rate",
"momentum", "alpha"]
for k, v in settings.items():
if k in rfallowedSettings:
logger.info('[%s] : [INFO] Neural Network %s set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v)
print("Neural Network %s set to %s" % (k, v))
if not isinstance(self.export, str):
mname = 'default'
else:
mname = self.export
df = self.__loadData(data, dropna)
features = df.columns[:-1]
X = df[features]
y = df.iloc[:, -1].values
mlp = MLPClassifier(hidden_layer_sizes=(50, 20), max_iter=settings['max_iter'],
activation=settings['activation'],
solver=settings['solver'], batch_size=settings['batch_size'],
learning_rate=settings['learning_rate'], momentum=settings['momentum'],
alpha=settings['alpha'])
if self.validratio:
trainSize = 1.0 - self.validratio
print("Neural Network training to validation ratio set to: %s" % str(self.validratio))
logger.info('[%s] : [INFO] Neural Netowork training to validation ratio set to: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(self.validratio))
d_train, d_test, f_train, f_test = self.__dataSplit(X, y, testSize=self.validratio, trainSize=trainSize)
mlp.fit(d_train, f_train)
predict = mlp.predict(d_train)
print("Prediction for Neural Network Training:")
print(predict)
print("Actual labels of training set:")
print(f_train)
predProb = mlp.predict_proba(d_train)
print("Prediction probabilities for Neural Network Training:")
print(predProb)
score = mlp.score(d_train, f_train)
print("Neural Network Training Score: %s" % str(score))
logger.info('[%s] : [INFO] Neural Network training score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score))
pred_valid = mlp.predict(d_test)
print("Neural Network Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(d_test)
score_valid = mlp.score(d_test, f_test)
print("Neural Network validation set score: %s" % str(score_valid))
logger.info('[%s] : [INFO] Random forest validation score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score_valid))
else:
mlp.fit(X, y)
predict = mlp.predict(X)
print("Prediction for Neural Network Training:")
print(predict)
print("Actual labels of training set:")
print(y)
predProb = mlp.predict_proba(X)
print("Prediction probabilities for Neural Network Training:")
print(predProb)
score = mlp.score(X, y)
print("Random Forest Training Score: %s" % str(score))
if self.validation is None:
logger.info('[%s] : [INFO] Validation is set to None',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# return True
else:
vfile = os.path.join(self.dataDir, settings['validation'])
logger.info('[%s] : [INFO] Validation data file is set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
if not os.path.isfile(vfile):
print("Validation file %s not found" % vfile)
logger.error('[%s] : [ERROR] Validation file %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
else:
df_valid = pd.read_csv(vfile)
if dropna:
df_valid = df_valid.dropna()
features_valid = df_valid.columns[:-1]
X_valid = df_valid[features_valid]
y_valid = df_valid.iloc[:, -1].values
pred_valid = mlp.predict(X_valid)
print("Neural Network Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(y_valid)
score_valid = mlp.score(X_valid, y_valid)
print("Neural Network validation set score: %s" % str(score_valid))
# return True
self.__serializemodel(mlp, 'RandomForest', mname)
return mlp
def decisionTree(self,
settings,
data=None,
dropna=True):
if "splitter" not in settings:
print("Received settings for Decision Tree are %s invalid!" % str(settings))
logger.error('[%s] : [ERROR] Received settings for Decision Tree %s are invalid',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(settings))
sys.exit(1)
if settings['random_state'] == 'None':
settings['random_state'] = None
else:
settings['random_state'] = int(settings['random_state'])
if settings['max_depth'] == 'None':
max_depth = None
else:
max_depth = int(settings['max_depth'])
if settings['max_features'] == 'auto':
max_features = settings['max_features']
else:
max_features = int(settings['max_features'])
dtallowedSettings = ["criterion", "splitter", "max_features", "max_depth",
"min_weight_faction_leaf", "random_state"]
for k, v in settings.items():
if k in dtallowedSettings:
logger.info('[%s] : [INFO] DecisionTree %s set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v)
print("DecisionTree %s set to %s" % (k, v))
if not isinstance(self.export, str):
mname = 'default'
else:
mname = self.export
df = self.__loadData(data, dropna)
features = df.columns[:-1]
X = df[features]
y = df.iloc[:, -1].values
# dt = DecisionTreeClassifier(min_samples_split=20, random_state=99)
dt = DecisionTreeClassifier(criterion=settings["criterion"], splitter=settings["splitter"],
max_features=max_features, max_depth=max_depth,
min_samples_split=float(settings["min_sample_split"]),
min_weight_fraction_leaf=float(settings["min_weight_faction_leaf"]),
random_state=settings["random_state"])
if self.validratio:
trainSize = 1.0 - self.validratio
print("Decision Tree training to validation ratio set to: %s" % str(self.validratio))
logger.info('[%s] : [INFO] Random forest training to validation ratio set to: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(self.validratio))
d_train, d_test, f_train, f_test = self.__dataSplit(X, y, testSize=self.validratio, trainSize=trainSize)
dt.fit(d_train, f_train)
predict = dt.predict(d_train)
print("Prediction for Decision Tree Training:")
print(predict)
print("Actual labels of training set:")
print(f_train)
predProb = dt.predict_proba(d_train)
print("Prediction probabilities for Decision Tree Training:")
print(predProb)
score = dt.score(d_train, f_train)
print("Decision Tree Training Score: %s" % str(score))
logger.info('[%s] : [INFO] Decision Tree training score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score))
feature_imp = list(zip(d_train, dt.feature_importances_))
print("Feature importance Decision Tree Training: ")
print(list(zip(d_train, dt.feature_importances_)))
logger.info('[%s] : [INFO] Decision Tree feature importance: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(feature_imp))
pred_valid = dt.predict(d_test)
print("Decision Tree Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(d_test)
score_valid = dt.score(d_test, f_test)
print("Decision Tree validation set score: %s" % str(score_valid))
logger.info('[%s] : [INFO] Random forest validation score: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(score_valid))
else:
dt.fit(X, y)
predict = dt.predict(X)
print("Prediction for Decision Tree Training:")
print(predict)
print("Actual labels of training set:")
print(y)
predProb = dt.predict_proba(X)
print("Prediction probabilities for Decision Tree Training:")
print(predProb)
score = dt.score(X, y)
print("Decision Tree Training Score: %s" % str(score))
fimp = list(zip(X, dt.feature_importances_))
print("Feature importance Random Forest Training: ")
print(fimp)
dfimp = dict(fimp)
dfimp = pd.DataFrame(list(dfimp.items()), columns=['Metric', 'Importance'])
sdfimp = dfimp.sort('Importance', ascending=False)
dfimpCsv = 'Feature_Importance_%s.csv' % mname
sdfimp.to_csv(os.path.join(self.modelDir, dfimpCsv))
if self.validation is None:
logger.info('[%s] : [INFO] Validation is set to None',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# return True
else:
vfile = os.path.join(self.dataDir, self.validation)
logger.info('[%s] : [INFO] Validation data file is set to %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
if not os.path.isfile(vfile):
print("Validation file %s not found" % vfile)
logger.error('[%s] : [ERROR] Validation file %s not found',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(vfile))
else:
df_valid = pd.read_csv(vfile)
if dropna:
df_valid = df_valid.dropna()
features_valid = df_valid.columns[:-1]
X_valid = df_valid[features_valid]
y_valid = df_valid.iloc[:, -1].values
pred_valid = dt.predict(X_valid)
print("Decision Tree Validation set prediction: ")
print(pred_valid)
print("Actual values of validation set: ")
print(y_valid)
score_valid = dt.score(X_valid, y_valid)
print("Random Decision Tree set score: %s" % str(score_valid))
# return True
self.__serializemodel(dt, 'DecisionTree', mname)
return dt
def dask_tpot(self,
settings,
X,
y):
from tpot import TPOTClassifier
if self.cv is None:
cv = self.cv
logger.info('[{}] : [INFO] TPOT Cross Validation not set, using default'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
elif isinstance(self.cv, int):
logger.info('[{}] : [INFO] TPOT Cross Validation set to {} folds:'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.cv))
cv = self.cv
else:
try:
logger.info('[{}] : [INFO] TPOT Cross Validation set to use {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.cv['Type']))
except:
logger.error('[{}] : [ERROR] TPOT Cross Validation split generator type not set!'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
sys.exit(1)
cv = self.__crossValidGenerator(self.cv)
settings.update({'cv': cv})
tp = TPOTClassifier(**settings)
for k, v in settings.items():
logger.info('[{}] : [INFO] TPOT parame {} set to {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), k, v))
logger.info('[{}] : [INFO] Starting TPOT Optimization ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
try:
pipeline_model = tp.fit(X, y)
except Exception as inst:
logger.error('[{}] : [ERROR] Failed to run TPOT optimization with {} and {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args))
sys.exit(1)
# print(pipeline_model.score(X, y))
logger.info('[{}] : [INFO] TPOT optimized best pipeline is: {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(pipeline_model.fitted_pipeline_.steps)))
# print(str(pipeline_model.fitted_pipeline_.steps))
# print(pipeline_model.pareto_front_fitted_pipelines_)
# print(pipeline_model.evaluated_individuals_)
self.__serializemodel(model=pipeline_model.fitted_pipeline_, method='TPOT', mname=self.export)
return 0
def dask_classifier(self,
settings,
mname,
X,
y,
classification_method=None):
# Factorize input
y_factor = | pd.factorize(y) | pandas.factorize |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import copy
import torch
import numpy as np
import pandas as pd
from qlib.data.dataset import DatasetH
device = "cuda" if torch.cuda.is_available() else "cpu"
def _to_tensor(x):
if not isinstance(x, torch.Tensor):
return torch.tensor(x, dtype=torch.float, device=device)
return x
def _create_ts_slices(index, seq_len):
"""
create time series slices from pandas index
Args:
index (pd.MultiIndex): pandas multiindex with <instrument, datetime> order
seq_len (int): sequence length
"""
assert index.is_lexsorted(), "index should be sorted"
# number of dates for each code
sample_count_by_codes = | pd.Series(0, index=index) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = np.asarray(["a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"],
categories=["a", "b", "c"])
exp = np.asarray(["c", "a", "b"])
res = cat.unique()
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
exp, categories=['c', 'a', 'b']))
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"],
categories=["a", "b", "c"])
res = cat.unique()
exp = np.asarray(["b", np.nan, "a"], dtype=object)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, Categorical(
["b", np.nan, "a"], categories=["b", "a"]))
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(['b', 'a', 'b'], categories=['a', 'b'], ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['c', 'b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['c', 'b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b', 'c'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'a', 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', 'a'])
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(['b', 'b', np.nan, 'a'], categories=['a', 'b', 'c'],
ordered=True)
res = cat.unique()
exp = np.asarray(['b', np.nan, 'a'], dtype=object)
exp_cat = Categorical(exp, categories=['a', 'b'], ordered=True)
self.assert_numpy_array_equal(res, exp)
tm.assert_categorical_equal(res, exp_cat)
def test_mode(self):
s = Categorical([1, 1, 2, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 1, 1, 4, 5, 5, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([5, 1], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
# NaN should not become the mode!
s = Categorical([np.nan, np.nan, np.nan, 4, 5],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, np.nan, 4, 5, 4],
categories=[5, 4, 3, 2, 1], ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
s = Categorical([np.nan, np.nan, 4, 5, 4], categories=[5, 4, 3, 2, 1],
ordered=True)
res = s.mode()
exp = Categorical([4], categories=[5, 4, 3, 2, 1], ordered=True)
self.assertTrue(res.equals(exp))
def test_sort(self):
# unordered cats are sortable
cat = Categorical(["a", "b", "b", "a"], ordered=False)
cat.sort_values()
cat.sort()
cat = Categorical(["a", "c", "b", "d"], ordered=True)
# sort_values
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Categorical(["a", "c", "b", "d"],
categories=["a", "b", "c", "d"], ordered=True)
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp)
# sort (inplace order)
cat1 = cat.copy()
cat1.sort()
exp = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(cat1.__array__(), exp)
def test_slicing_directly(self):
cat = Categorical(["a", "b", "c", "d", "a", "b", "c"])
sliced = cat[3]
tm.assert_equal(sliced, "d")
sliced = cat[3:5]
expected = Categorical(["d", "a"], categories=['a', 'b', 'c', 'd'])
self.assert_numpy_array_equal(sliced._codes, expected._codes)
tm.assert_index_equal(sliced.categories, expected.categories)
def test_set_item_nan(self):
cat = pd.Categorical([1, 2, 3])
exp = pd.Categorical([1, np.nan, 3], categories=[1, 2, 3])
cat[1] = np.nan
self.assertTrue(cat.equals(exp))
# if nan in categories, the proper code should be set!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1] = np.nan
exp = np.array([0, 3, 2, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = np.nan
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, 1]
exp = np.array([0, 3, 0, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[1:3] = [np.nan, np.nan]
exp = np.array([0, 3, 3, -1])
self.assert_numpy_array_equal(cat.codes, exp)
cat = pd.Categorical([1, 2, np.nan, 3], categories=[1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
cat.set_categories([1, 2, 3, np.nan], rename=True, inplace=True)
cat[pd.isnull(cat)] = np.nan
exp = np.array([0, 1, 3, 2])
self.assert_numpy_array_equal(cat.codes, exp)
def test_shift(self):
# GH 9416
cat = pd.Categorical(['a', 'b', 'c', 'd', 'a'])
# shift forward
sp1 = cat.shift(1)
xp1 = pd.Categorical([np.nan, 'a', 'b', 'c', 'd'])
self.assert_categorical_equal(sp1, xp1)
self.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = pd.Categorical(['c', 'd', 'a', np.nan, np.nan],
categories=['a', 'b', 'c', 'd'])
self.assert_categorical_equal(sn2, xp2)
self.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
self.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = pd.Categorical([1, 2, 3])
exp = cat._codes.nbytes + cat._categories.values.nbytes
self.assertEqual(cat.nbytes, exp)
def test_memory_usage(self):
cat = pd.Categorical([1, 2, 3])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertEqual(cat.nbytes, cat.memory_usage(deep=True))
cat = pd.Categorical(['foo', 'foo', 'bar'])
self.assertEqual(cat.nbytes, cat.memory_usage())
self.assertTrue(cat.memory_usage(deep=True) > cat.nbytes)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
self.assertTrue(abs(diff) < 100)
def test_searchsorted(self):
# https://github.com/pydata/pandas/issues/8420
s1 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk'])
s2 = pd.Series(['apple', 'bread', 'bread', 'cheese', 'milk', 'donuts'])
c1 = pd.Categorical(s1, ordered=True)
c2 = pd.Categorical(s2, ordered=True)
# Single item array
res = c1.searchsorted(['bread'])
chk = s1.searchsorted(['bread'])
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Scalar version of single item array
# Categorical return np.array like pd.Series, but different from
# np.array.searchsorted()
res = c1.searchsorted('bread')
chk = s1.searchsorted('bread')
exp = np.array([1])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present in the Categorical
res = c1.searchsorted(['bread', 'eggs'])
chk = s1.searchsorted(['bread', 'eggs'])
exp = np.array([1, 4])
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# Searching for a value that is not present, to the right
res = c1.searchsorted(['bread', 'eggs'], side='right')
chk = s1.searchsorted(['bread', 'eggs'], side='right')
exp = np.array([3, 4]) # eggs before milk
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
# As above, but with a sorter array to reorder an unsorted array
res = c2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
chk = s2.searchsorted(['bread', 'eggs'], side='right',
sorter=[0, 1, 2, 3, 5, 4])
exp = np.array([3, 5]
) # eggs after donuts, after switching milk and donuts
self.assert_numpy_array_equal(res, exp)
self.assert_numpy_array_equal(res, chk)
def test_deprecated_labels(self):
# TODO: labels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.codes
with tm.assert_produces_warning(FutureWarning):
res = cat.labels
self.assert_numpy_array_equal(res, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_deprecated_levels(self):
# TODO: levels is deprecated and should be removed in 0.18 or 2017,
# whatever is earlier
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
exp = cat.categories
with tm.assert_produces_warning(FutureWarning):
res = cat.levels
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
res = pd.Categorical([1, 2, 3, np.nan], levels=[1, 2, 3])
self.assert_numpy_array_equal(res.categories, exp)
self.assertFalse(LooseVersion(pd.__version__) >= '0.18')
def test_removed_names_produces_warning(self):
# 10482
with tm.assert_produces_warning(UserWarning):
Categorical([0, 1], name="a")
with tm.assert_produces_warning(UserWarning):
Categorical.from_codes([1, 2], ["a", "b", "c"], name="a")
def test_datetime_categorical_comparison(self):
dt_cat = pd.Categorical(
pd.date_range('2014-01-01', periods=3), ordered=True)
self.assert_numpy_array_equal(dt_cat > dt_cat[0], [False, True, True])
self.assert_numpy_array_equal(dt_cat[0] < dt_cat, [False, True, True])
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assert_numpy_array_equal(cat > cat[0], [False, True, True])
self.assert_numpy_array_equal(cat[0] < cat, [False, True, True])
def test_comparison_with_unknown_scalars(self):
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = pd.Categorical([1, 2, 3], ordered=True)
self.assertRaises(TypeError, lambda: cat < 4)
self.assertRaises(TypeError, lambda: cat > 4)
self.assertRaises(TypeError, lambda: 4 < cat)
self.assertRaises(TypeError, lambda: 4 > cat)
self.assert_numpy_array_equal(cat == 4, [False, False, False])
self.assert_numpy_array_equal(cat != 4, [True, True, True])
class TestCategoricalAsBlock(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a', 'a', 'c',
'c', 'c'])
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500), right=False,
labels=labels)
self.cat = df
def test_dtypes(self):
# GH8143
index = ['cat', 'obj', 'num']
cat = pd.Categorical(['a', 'b', 'c'])
obj = pd.Series(['a', 'b', 'c'])
num = pd.Series([1, 2, 3])
df = pd.concat([pd.Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == 'object'
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'int64'
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == 'category'
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = Categorical(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
result = Categorical(['foo%05d' % i for i in range(40000)])
self.assertTrue(result.codes.dtype == 'int32')
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
self.assertTrue(result.codes.dtype == 'int8')
result = result.add_categories(['foo%05d' % i for i in range(400)])
self.assertTrue(result.codes.dtype == 'int16')
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
self.assertTrue(result.codes.dtype == 'int8')
def test_basic(self):
# test basic creation / coercion of categoricals
s = Series(self.factor, name='A')
self.assertEqual(s.dtype, 'category')
self.assertEqual(len(s), len(self.factor))
str(s.values)
str(s)
# in a frame
df = DataFrame({'A': self.factor})
result = df['A']
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
df = DataFrame({'A': s})
result = df['A']
tm.assert_series_equal(result, s)
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# multiples
df = DataFrame({'A': s, 'B': s, 'C': 1})
result1 = df['A']
result2 = df['B']
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
self.assertEqual(result2.name, 'B')
self.assertEqual(len(df), len(self.factor))
str(df.values)
str(df)
# GH8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name
) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
self.assertEqual(result, expected)
result = x.person_name[0]
self.assertEqual(result, expected)
result = x.person_name.loc[0]
self.assertEqual(result, expected)
def test_creation_astype(self):
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
l = [1, 2, 3, 1]
s = pd.Series(l)
exp = pd.Series(Categorical(l))
res = s.astype('category')
tm.assert_series_equal(res, exp)
df = pd.DataFrame({"cats": [1, 2, 3, 4, 5, 6],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = pd.DataFrame({"cats": ['a', 'b', 'b', 'a', 'a', 'd'],
"vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical(['a', 'b', 'b', 'a', 'a', 'd'])
exp_df = pd.DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
l = ["a", "b", "c", "a"]
s = pd.Series(l)
exp = pd.Series(Categorical(l, ordered=True))
res = s.astype('category', ordered=True)
tm.assert_series_equal(res, exp)
exp = pd.Series(Categorical(
l, categories=list('abcdef'), ordered=True))
res = s.astype('category', categories=list('abcdef'), ordered=True)
tm.assert_series_equal(res, exp)
def test_construction_series(self):
l = [1, 2, 3, 1]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
l = ["a", "b", "c", "a"]
exp = Series(l).astype('category')
res = Series(l, dtype='category')
tm.assert_series_equal(res, exp)
# insert into frame with different index
# GH 8076
index = pd.date_range('20000101', periods=3)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
expected = DataFrame({'x': expected})
df = DataFrame(
{'x': Series(['a', 'b', 'c'], dtype='category')}, index=index)
tm.assert_frame_equal(df, expected)
def test_construction_frame(self):
# GH8626
# dict creation
df = DataFrame({'A': list('abc')}, dtype='category')
expected = Series(list('abc'), dtype='category', name='A')
tm.assert_series_equal(df['A'], expected)
# to_frame
s = Series(list('abc'), dtype='category')
result = s.to_frame()
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(result[0], expected)
result = s.to_frame(name='foo')
expected = Series(list('abc'), dtype='category', name='foo')
tm.assert_series_equal(result['foo'], expected)
# list-like creation
df = DataFrame(list('abc'), dtype='category')
expected = Series(list('abc'), dtype='category', name=0)
tm.assert_series_equal(df[0], expected)
# ndim != 1
df = DataFrame([pd.Categorical(list('abc'))])
expected = DataFrame({0: Series(list('abc'), dtype='category')})
tm.assert_frame_equal(df, expected)
df = DataFrame([pd.Categorical(list('abc')), pd.Categorical(list(
'abd'))])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: Series(list('abd'), dtype='category')},
columns=[0, 1])
tm.assert_frame_equal(df, expected)
# mixed
df = DataFrame([pd.Categorical(list('abc')), list('def')])
expected = DataFrame({0: Series(list('abc'), dtype='category'),
1: list('def')}, columns=[0, 1])
tm.assert_frame_equal(df, expected)
# invalid (shape)
self.assertRaises(
ValueError,
lambda: DataFrame([pd.Categorical(list('abc')),
pd.Categorical(list('abdefg'))]))
# ndim > 1
self.assertRaises(NotImplementedError,
lambda: pd.Categorical(np.array([list('abcd')])))
def test_reshaping(self):
p = tm.makePanel()
p['str'] = 'foo'
df = p.to_frame()
df['category'] = df['str'].astype('category')
result = df['category'].unstack()
c = Categorical(['foo'] * len(p.major_axis))
expected = DataFrame({'A': c.copy(),
'B': c.copy(),
'C': c.copy(),
'D': c.copy()},
columns=Index(list('ABCD'), name='minor'),
index=p.major_axis.set_names('major'))
tm.assert_frame_equal(result, expected)
def test_reindex(self):
index = pd.date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat, copy=True)
self.assertFalse(s.cat is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
exp_cat = np.array(["a", "b", "c", "a"])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = pd.Series(cat)
self.assertTrue(s.values is cat)
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s)
self.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1])
self.assert_numpy_array_equal(s.__array__(), exp_s2)
self.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_nan_handling(self):
# Nans are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(s.values.codes, np.array([0, 1, -1, 0]))
# If categories have nan included, the label should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
s2 = Series(Categorical(
["a", "b", np.nan, "a"], categories=["a", "b", np.nan]))
self.assert_numpy_array_equal(s2.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s2.values.codes, np.array([0, 1, 2, 0]))
# Changing categories should also make the replaced category np.nan
s3 = Series(Categorical(["a", "b", "c", "a"]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
s3.cat.categories = ["a", "b", np.nan]
self.assert_numpy_array_equal(s3.cat.categories, np.array(
["a", "b", np.nan], dtype=np.object_))
self.assert_numpy_array_equal(s3.values.codes, np.array([0, 1, 2, 0]))
def test_cat_accessor(self):
s = Series(Categorical(["a", "b", np.nan, "a"]))
self.assert_numpy_array_equal(s.cat.categories, np.array(["a", "b"]))
self.assertEqual(s.cat.ordered, False)
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s.cat.set_categories(["b", "a"], inplace=True)
self.assertTrue(s.values.equals(exp))
res = s.cat.set_categories(["b", "a"])
self.assertTrue(res.values.equals(exp))
exp = Categorical(["a", "b", np.nan, "a"], categories=["b", "a"])
s[:] = "a"
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, np.array(["a"]))
def test_sequence_like(self):
# GH 7839
# make sure can iterate
df = DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df['grade'] = Categorical(df['raw_grade'])
# basic sequencing testing
result = list(df.grade.values)
expected = np.array(df.grade.values).tolist()
tm.assert_almost_equal(result, expected)
# iteration
for t in df.itertuples(index=False):
str(t)
for row, s in df.iterrows():
str(s)
for c, col in df.iteritems():
str(s)
def test_series_delegations(self):
# invalid accessor
self.assertRaises(AttributeError, lambda: Series([1, 2, 3]).cat)
tm.assertRaisesRegexp(
AttributeError,
r"Can only use .cat accessor with a 'category' dtype",
lambda: Series([1, 2, 3]).cat)
self.assertRaises(AttributeError, lambda: Series(['a', 'b', 'c']).cat)
self.assertRaises(AttributeError, lambda: Series(np.arange(5.)).cat)
self.assertRaises(AttributeError,
lambda: Series([Timestamp('20130101')]).cat)
# Series should delegate calls to '.categories', '.codes', '.ordered'
# and the methods '.set_categories()' 'drop_unused_categories()' to the
# categorical
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
s.cat.categories = [1, 2, 3]
exp_categories = np.array([1, 2, 3])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
exp_codes = Series([0, 1, 2, 0], dtype='int8')
tm.assert_series_equal(s.cat.codes, exp_codes)
self.assertEqual(s.cat.ordered, True)
s = s.cat.as_unordered()
self.assertEqual(s.cat.ordered, False)
s.cat.as_ordered(inplace=True)
self.assertEqual(s.cat.ordered, True)
# reorder
s = Series(Categorical(["a", "b", "c", "a"], ordered=True))
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
s = s.cat.set_categories(["c", "b", "a"])
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# remove unused categories
s = Series(Categorical(["a", "b", "b", "a"], categories=["a", "b", "c"
]))
exp_categories = np.array(["a", "b"])
exp_values = np.array(["a", "b", "b", "a"])
s = s.cat.remove_unused_categories()
self.assert_numpy_array_equal(s.cat.categories, exp_categories)
self.assert_numpy_array_equal(s.values.__array__(), exp_values)
self.assert_numpy_array_equal(s.__array__(), exp_values)
# This method is likely to be confused, so test that it raises an error
# on wrong inputs:
def f():
s.set_categories([4, 3, 2, 1])
self.assertRaises(Exception, f)
# right: s.cat.set_categories([4,3,2,1])
def test_series_functions_no_warnings(self):
df = pd.DataFrame({'value': np.random.randint(0, 100, 20)})
labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)]
with tm.assert_produces_warning(False):
df['group'] = pd.cut(df.value, range(0, 105, 10), right=False,
labels=labels)
def test_assignment_to_dataframe(self):
# assignment
df = DataFrame({'value': np.array(
np.random.randint(0, 10000, 100), dtype='int32')})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
df = df.sort_values(by=['value'], ascending=True)
s = pd.cut(df.value, range(0, 10500, 500), right=False, labels=labels)
d = s.values
df['D'] = d
str(df)
result = df.dtypes
expected = Series(
[np.dtype('int32'), com.CategoricalDtype()], index=['value', 'D'])
tm.assert_series_equal(result, expected)
df['E'] = s
str(df)
result = df.dtypes
expected = Series([np.dtype('int32'), com.CategoricalDtype(),
com.CategoricalDtype()],
index=['value', 'D', 'E'])
tm.assert_series_equal(result, expected)
result1 = df['D']
result2 = df['E']
self.assertTrue(result1._data._block.values.equals(d))
# sorting
s.name = 'E'
self.assertTrue(result2.sort_index().equals(s.sort_index()))
cat = pd.Categorical([1, 2, 3, 10], categories=[1, 2, 3, 4, 10])
df = pd.DataFrame(pd.Series(cat))
def test_describe(self):
# Categoricals should not show up together with numerical columns
result = self.cat.describe()
self.assertEqual(len(result.columns), 1)
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = pd.Series(pd.Categorical(["a", "b", "c", "c"]))
df3 = pd.DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
self.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_repr(self):
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
self.assertEqual(exp, a.__unicode__())
a = pd.Series(pd.Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
self.assertEqual(exp, repr(a))
levs = list("abcdefghijklmnopqrstuvwxyz")
a = pd.Series(pd.Categorical(
["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
self.assertEqual(exp, a.__unicode__())
def test_categorical_repr(self):
c = pd.Categorical([1, 2, 3])
exp = """[1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1, 2, 3, 4, 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20))
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_ordered(self):
c = pd.Categorical([1, 2, 3], ordered=True)
exp = """[1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3],
ordered=True)
exp = """[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical([1, 2, 3, 4, 5] * 10, ordered=True)
exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5]
Length: 50
Categories (5, int64): [1 < 2 < 3 < 4 < 5]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(np.arange(20), ordered=True)
exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19]
Length: 20
Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
# TODO(wesm): exceeding 80 characters in the console is not good
# behavior
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]""")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, "
"2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]\n"
"Categories (5, datetime64[ns]): [2011-01-01 09:00:00, "
"2011-01-01 10:00:00, 2011-01-01 11:00:00,\n"
" 2011-01-01 12:00:00, "
"2011-01-01 13:00:00]")
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = (
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, "
"2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, "
"2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, "
"2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n"
"Categories (5, datetime64[ns, US/Eastern]): "
"[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n"
" "
"2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n"
" "
"2011-01-01 13:00:00-05:00]")
self.assertEqual(repr(c), exp)
def test_categorical_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00]
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
self.assertEqual(repr(c), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00]
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(c), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05]
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00,
18 days 01:00:00, 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
c = pd.Categorical(idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days]
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(c), exp)
idx = pd.timedelta_range('1 hours', periods=20)
c = pd.Categorical(idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 20
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
c = pd.Categorical(idx.append(idx), categories=idx, ordered=True)
exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00]
Length: 40
Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 <
18 days 01:00:00 < 19 days 01:00:00]"""
self.assertEqual(repr(c), exp)
def test_categorical_series_repr(self):
s = pd.Series(pd.Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_ordered(self):
s = pd.Series(pd.Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
self.assertEqual(repr(s), exp)
s = pd.Series(pd.Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]"""
self.assertEqual(repr(s), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]"""
self.assertEqual(repr(s), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_series_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]"""
self.assertEqual(repr(s), exp)
idx = pd.timedelta_range('1 hours', periods=10)
s = pd.Series(pd.Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]"""
self.assertEqual(repr(s), exp)
def test_categorical_index_repr(self):
idx = pd.CategoricalIndex(pd.Categorical([1, 2, 3]))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10)))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_ordered(self):
i = pd.CategoricalIndex(pd.Categorical([1, 2, 3], ordered=True))
exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(np.arange(10), ordered=True))
exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, 4, 5, 6, 7, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_datetime_ordered(self):
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00',
'2011-01-01 11:00:00', '2011-01-01 12:00:00',
'2011-01-01 13:00:00'],
categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx), ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00',
'2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00',
'2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00',
'2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00',
'2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'],
categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period(self):
# test all length
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=1)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=2)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=3)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
i = pd.CategoricalIndex(pd.Categorical(idx.append(idx)))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00',
'2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00',
'2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_period_ordered(self):
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00',
'2011-01-01 12:00', '2011-01-01 13:00'],
categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.period_range('2011-01', freq='M', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=False, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_index_repr_timedelta_ordered(self):
idx = pd.timedelta_range('1 days', periods=5)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days 00:00:00, 2 days 00:00:00, 3 days 00:00:00, 4 days 00:00:00, 5 days 00:00:00], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
idx = pd.timedelta_range('1 hours', periods=10)
i = pd.CategoricalIndex(pd.Categorical(idx, ordered=True))
exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00',
'6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00',
'9 days 01:00:00'],
categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, 5 days 01:00:00, 6 days 01:00:00, 7 days 01:00:00, ...], ordered=True, dtype='category')"""
self.assertEqual(repr(i), exp)
def test_categorical_frame(self):
# normal DataFrame
dt = pd.date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
p = pd.period_range('2011-01', freq='M', periods=5)
df = pd.DataFrame({'dt': dt, 'p': p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
df = pd.DataFrame({'dt': pd.Categorical(dt), 'p': pd.Categorical(p)})
self.assertEqual(repr(df), exp)
def test_info(self):
# make sure it works
n = 2500
df = DataFrame({'int64': np.random.randint(100, size=n)})
df['category'] = Series(np.array(list('abcdefghij')).take(
np.random.randint(0, 10, size=n))).astype('category')
df.isnull()
df.info()
df2 = df[df['category'] == 'd']
df2.info()
def test_groupby_sort(self):
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
res = self.cat.groupby(['value_group'])['value_group'].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Series(Categorical(["a", "b", "c", "d"], ordered=False))
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Series(Categorical(["a", "b", "c", "d"], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Series(Categorical(["a", "b", "c", "d"], categories=[
'd', 'c', 'b', 'a'], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Series(Categorical(
[np.nan, "b", "c", np.nan], categories=['d', 'c', 'b', 'a'
], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
cat = Series(Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True))
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
def test_mode(self):
s = Series(Categorical([1, 1, 2, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 1, 1, 4, 5, 5, 5],
categories=[5, 4, 3, 2, 1], ordered=True))
res = s.mode()
exp = Series(Categorical([5, 1], categories=[
5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
s = Series(Categorical([1, 2, 3, 4, 5], categories=[5, 4, 3, 2, 1],
ordered=True))
res = s.mode()
exp = Series(Categorical([], categories=[5, 4, 3, 2, 1], ordered=True))
tm.assert_series_equal(res, exp)
def test_value_counts(self):
s = pd.Series(pd.Categorical(
["a", "b", "c", "c", "c", "b"], categories=["c", "a", "b", "d"]))
res = s.value_counts(sort=False)
exp = Series([3, 1, 2, 0],
index=pd.CategoricalIndex(["c", "a", "b", "d"]))
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp = Series([3, 2, 1, 0],
index=pd.CategoricalIndex(["c", "b", "a", "d"]))
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# https://github.com/pydata/pandas/issues/9443
s = pd.Series(["a", "b", "a"], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
s = pd.Series(["a", "b", None, "a", None, None], dtype="category")
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1], index=pd.CategoricalIndex([np.nan, "a", "b"])))
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
tm.assert_series_equal(
s.value_counts(dropna=False, sort=False),
pd.Series([2, 1, 3],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", "a"], categories=["a", "b", np.nan]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([2, 1, 0],
index=pd.CategoricalIndex(["a", "b", np.nan])))
with tm.assert_produces_warning(FutureWarning):
s = pd.Series(pd.Categorical(
["a", "b", None, "a", None, None], categories=["a", "b", np.nan
]))
tm.assert_series_equal(
s.value_counts(dropna=True),
pd.Series([2, 1], index=pd.CategoricalIndex(["a", "b"])))
tm.assert_series_equal(
s.value_counts(dropna=False),
pd.Series([3, 2, 1],
index=pd.CategoricalIndex([np.nan, "a", "b"])))
def test_groupby(self):
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"
], categories=["a", "b", "c", "d"], ordered=True)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
expected = DataFrame({'a': Series(
[1, 2, 4, np.nan], index=pd.CategoricalIndex(
['a', 'b', 'c', 'd'], name='b'))})
result = data.groupby("b").mean()
tm.assert_frame_equal(result, expected)
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A")
exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A')
expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers
gb = df.groupby(['A', 'B'])
expected = DataFrame({'values': Series(
[1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan
], index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']], names=['A', 'B']))})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# multiple groupers with a non-cat
df = df.copy()
df['C'] = ['foo', 'bar'] * 2
gb = df.groupby(['A', 'B', 'C'])
expected = DataFrame({'values': Series(
np.nan, index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y'], ['foo', 'bar']
], names=['A', 'B', 'C']))}).sortlevel()
expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4]
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = pd.DataFrame([[1, '<NAME>'], [2, '<NAME>'],
[1, '<NAME>']],
columns=['person_id', 'person_name'])
x['person_name'] = pd.Categorical(x.person_name)
g = x.groupby(['person_id'])
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[['person_name']])
result = x.drop_duplicates('person_name')
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates('person_name').iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name='person_id')
expected['person_name'] = expected['person_name'].astype('object')
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']])
# Filter
tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a'])
tm.assert_frame_equal(df.groupby(c).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c).transform(sum)
tm.assert_series_equal(result, df['a'], check_names=False)
self.assertTrue(result.name is None)
tm.assert_series_equal(
df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a'])
tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']])
tm.assert_frame_equal(
df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']])
# GH 9603
df = pd.DataFrame({'a': [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4])
result = df.groupby(c).apply(len)
expected = pd.Series([1, 0, 0, 0],
index=pd.CategoricalIndex(c.values.categories))
expected.index.name = 'a'
tm.assert_series_equal(result, expected)
def test_pivot_table(self):
raw_cat1 = Categorical(["a", "a", "b", "b"],
categories=["a", "b", "z"], ordered=True)
raw_cat2 = Categorical(["c", "d", "c", "d"],
categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]})
result = pd.pivot_table(df, values='values', index=['A', 'B'])
expected = Series([1, 2, np.nan, 3, 4, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex.from_product(
[['a', 'b', 'z'], ['c', 'd', 'y']],
names=['A', 'B']),
name='values')
tm.assert_series_equal(result, expected)
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
self.assertEqual(result, 2)
def test_sort(self):
c = Categorical(["a", "b", "b", "a"], ordered=False)
cat = Series(c)
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
c.order()
# sort in the categories order
expected = Series(
Categorical(["a", "a", "b", "b"],
ordered=False), index=[0, 3, 1, 2])
result = cat.sort_values()
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "c", "b", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
cat = Series(Categorical(["a", "c", "b", "d"], categories=[
"a", "b", "c", "d"], ordered=True))
res = cat.sort_values()
exp = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(res.__array__(), exp)
res = cat.sort_values(ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res.__array__(), exp)
raw_cat1 = Categorical(["a", "b", "c", "d"],
categories=["a", "b", "c", "d"], ordered=False)
raw_cat2 = Categorical(["a", "b", "c", "d"],
categories=["d", "c", "b", "a"], ordered=True)
s = ["a", "b", "c", "d"]
df = DataFrame({"unsort": raw_cat1,
"sort": raw_cat2,
"string": s,
"values": [1, 2, 3, 4]})
# Cats must be sorted in a dataframe
res = df.sort_values(by=["string"], ascending=False)
exp = np.array(["d", "c", "b", "a"])
self.assert_numpy_array_equal(res["sort"].values.__array__(), exp)
self.assertEqual(res["sort"].dtype, "category")
res = df.sort_values(by=["sort"], ascending=False)
exp = df.sort_values(by=["string"], ascending=True)
self.assert_numpy_array_equal(res["values"], exp["values"])
self.assertEqual(res["sort"].dtype, "category")
self.assertEqual(res["unsort"].dtype, "category")
# unordered cat, but we allow this
df.sort_values(by=["unsort"], ascending=False)
# multi-columns sort
# GH 7848
df = DataFrame({"id": [6, 5, 4, 3, 2, 1],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"], ordered=True)
df['grade'] = df['grade'].cat.set_categories(['b', 'e', 'a'])
# sorts 'grade' according to the order of the categories
result = df.sort_values(by=['grade'])
expected = df.iloc[[1, 2, 5, 0, 3, 4]]
tm.assert_frame_equal(result, expected)
# multi
result = df.sort_values(by=['grade', 'id'])
expected = df.iloc[[2, 1, 5, 4, 3, 0]]
tm.assert_frame_equal(result, expected)
# reverse
cat = Categorical(["a", "c", "c", "b", "d"], ordered=True)
res = cat.sort_values(ascending=False)
exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
# some NaN positions
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='first')
exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True)
res = cat.sort_values(ascending=False, na_position='last')
exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object)
exp_categories = np.array(["a", "b", "c", "d"], dtype=object)
self.assert_numpy_array_equal(res.__array__(), exp_val)
self.assert_numpy_array_equal(res.categories, exp_categories)
def test_slicing(self):
cat = Series(Categorical([1, 2, 3, 4]))
reversed = cat[::-1]
exp = np.array([4, 3, 2, 1])
self.assert_numpy_array_equal(reversed.__array__(), exp)
df = DataFrame({'value': (np.arange(100) + 1).astype('int64')})
df['D'] = pd.cut(df.value, bins=[0, 25, 50, 75, 100])
expected = Series([11, '(0, 25]'], index=['value', 'D'], name=10)
result = df.iloc[10]
tm.assert_series_equal(result, expected)
expected = DataFrame({'value': np.arange(11, 21).astype('int64')},
index=np.arange(10, 20).astype('int64'))
expected['D'] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100])
result = df.iloc[10:20]
tm.assert_frame_equal(result, expected)
expected = Series([9, '(0, 25]'], index=['value', 'D'], name=8)
result = df.loc[8]
tm.assert_series_equal(result, expected)
def test_slicing_and_getting_ops(self):
# systematically test the slicing operations:
# for all slicing ops:
# - returning a dataframe
# - returning a column
# - returning a row
# - returning a single value
cats = pd.Categorical(
["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 2, 3, 4, 5, 6, 7]
df = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
cats2 = pd.Categorical(["b", "c"], categories=["a", "b", "c"])
idx2 = pd.Index(["j", "k"])
values2 = [3, 4]
# 2:4,: | "j":"k",:
exp_df = pd.DataFrame({"cats": cats2, "values": values2}, index=idx2)
# :,"cats" | :,0
exp_col = pd.Series(cats, index=idx, name='cats')
# "j",: | 2,:
exp_row = pd.Series(["b", 3], index=["cats", "values"], dtype="object",
name="j")
# "j","cats | 2,0
exp_val = "b"
# iloc
# frame
res_df = df.iloc[2:4, :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.iloc[2, :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.iloc[2, 0]
self.assertEqual(res_val, exp_val)
# loc
# frame
res_df = df.loc["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.loc["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.loc[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.loc["j", "cats"]
self.assertEqual(res_val, exp_val)
# ix
# frame
# res_df = df.ix["j":"k",[0,1]] # doesn't work?
res_df = df.ix["j":"k", :]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
# row
res_row = df.ix["j", :]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
# col
res_col = df.ix[:, "cats"]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
# single value
res_val = df.ix["j", 0]
self.assertEqual(res_val, exp_val)
# iat
res_val = df.iat[2, 0]
self.assertEqual(res_val, exp_val)
# at
res_val = df.at["j", "cats"]
self.assertEqual(res_val, exp_val)
# fancy indexing
exp_fancy = df.iloc[[2]]
res_fancy = df[df["cats"] == "b"]
tm.assert_frame_equal(res_fancy, exp_fancy)
res_fancy = df[df["values"] == 3]
tm.assert_frame_equal(res_fancy, exp_fancy)
# get_value
res_val = df.get_value("j", "cats")
self.assertEqual(res_val, exp_val)
# i : int, slice, or sequence of integers
res_row = df.iloc[2]
tm.assert_series_equal(res_row, exp_row)
tm.assertIsInstance(res_row["cats"], compat.string_types)
res_df = df.iloc[slice(2, 4)]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[[2, 3]]
tm.assert_frame_equal(res_df, exp_df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_col = df.iloc[:, 0]
tm.assert_series_equal(res_col, exp_col)
self.assertTrue(com.is_categorical_dtype(res_col))
res_df = df.iloc[:, slice(0, 2)]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
res_df = df.iloc[:, [0, 1]]
tm.assert_frame_equal(res_df, df)
self.assertTrue(com.is_categorical_dtype(res_df["cats"]))
def test_slicing_doc_examples(self):
# GH 7918
cats = Categorical(
["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"])
idx = Index(["h", "i", "j", "k", "l", "m", "n", ])
values = [1, 2, 2, 2, 3, 4, 5]
df = DataFrame({"cats": cats, "values": values}, index=idx)
result = df.iloc[2:4, :]
expected = DataFrame(
{"cats": Categorical(
['b', 'b'], categories=['a', 'b', 'c']),
"values": [2, 2]}, index=['j', 'k'])
tm.assert_frame_equal(result, expected)
result = df.iloc[2:4, :].dtypes
expected = Series(['category', 'int64'], ['cats', 'values'])
tm.assert_series_equal(result, expected)
result = df.loc["h":"j", "cats"]
expected = Series(Categorical(['a', 'b', 'b'],
categories=['a', 'b', 'c']),
index=['h', 'i', 'j'], name='cats')
tm.assert_series_equal(result, expected)
result = df.ix["h":"j", 0:1]
expected = DataFrame({'cats': Series(
Categorical(
['a', 'b', 'b'], categories=['a', 'b', 'c']), index=['h', 'i',
'j'])})
tm.assert_frame_equal(result, expected)
def test_assigning_ops(self):
# systematically test the assigning operations:
# for all slicing ops:
# for value in categories and value not in categories:
# - assign a single value -> exp_single_cats_value
# - assign a complete row (mixed values) -> exp_single_row
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
cats = pd.Categorical(
["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = pd.DataFrame({"cats": cats, "values": values}, index=idx)
# the expected values
# changed single row
cats1 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = pd.DataFrame(
{"cats": cats1,
"values": values1}, index=idx1)
# changed multiple rows
cats2 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = pd.DataFrame(
{"cats": cats2,
"values": values2}, index=idx2)
# changed part of the cats column
cats3 = pd.Categorical(
["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = pd.DataFrame(
{"cats": cats3,
"values": values3}, index=idx3)
# changed single value in cats col
cats4 = pd.Categorical(
["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx4 = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
values4 = [1, 1, 1, 1, 1, 1, 1]
exp_single_cats_value = pd.DataFrame(
{"cats": cats4,
"values": values4}, index=idx4)
# iloc
# ###############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.iloc[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.iloc[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iloc[2, 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.iloc[2, :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.iloc[2, :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.iloc[2:4, :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.iloc[2:4, :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.iloc[2:4, 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.iloc[2:4, 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.iloc[2:4, 0] = ["c", "c"]
# loc
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.loc["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.loc[df.index == "j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.loc["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.loc["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.loc["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.loc["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.loc["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.loc["j":"k", "cats"] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.loc["j":"k", "cats"] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.loc["j":"k", "cats"] = ["c", "c"]
# ix
# ##############
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.ix["j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
df = orig.copy()
df.ix[df.index == "j", 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.ix["j", 0] = "c"
self.assertRaises(ValueError, f)
# - assign a complete row (mixed values) -> exp_single_row
df = orig.copy()
df.ix["j", :] = ["b", 2]
tm.assert_frame_equal(df, exp_single_row)
# - assign a complete row (mixed values) not in categories set
def f():
df = orig.copy()
df.ix["j", :] = ["c", 2]
self.assertRaises(ValueError, f)
# - assign multiple rows (mixed values) -> exp_multi_row
df = orig.copy()
df.ix["j":"k", :] = [["b", 2], ["b", 2]]
tm.assert_frame_equal(df, exp_multi_row)
def f():
df = orig.copy()
df.ix["j":"k", :] = [["c", 2], ["c", 2]]
self.assertRaises(ValueError, f)
# assign a part of a column with dtype == categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
# different categories -> not sure if this should fail or pass
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["b", "b"], categories=["a", "b", "c"])
with tm.assertRaises(ValueError):
# different values
df = orig.copy()
df.ix["j":"k", 0] = pd.Categorical(
["c", "c"], categories=["a", "b", "c"])
# assign a part of a column with dtype != categorical ->
# exp_parts_cats_col
df = orig.copy()
df.ix["j":"k", 0] = ["b", "b"]
tm.assert_frame_equal(df, exp_parts_cats_col)
with tm.assertRaises(ValueError):
df.ix["j":"k", 0] = ["c", "c"]
# iat
df = orig.copy()
df.iat[2, 0] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.iat[2, 0] = "c"
self.assertRaises(ValueError, f)
# at
# - assign a single value -> exp_single_cats_value
df = orig.copy()
df.at["j", "cats"] = "b"
tm.assert_frame_equal(df, exp_single_cats_value)
# - assign a single value not in the current categories set
def f():
df = orig.copy()
df.at["j", "cats"] = "c"
self.assertRaises(ValueError, f)
# fancy indexing
catsf = pd.Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"])
idxf = pd.Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = pd.DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
exp_fancy["cats"].cat.set_categories(["a", "b", "c"], inplace=True)
df[df["cats"] == "c"] = ["b", 2]
tm.assert_frame_equal(df, exp_multi_row)
# set_value
df = orig.copy()
df.set_value("j", "cats", "b")
tm.assert_frame_equal(df, exp_single_cats_value)
def f():
df = orig.copy()
df.set_value("j", "cats", "c")
self.assertRaises(ValueError, f)
# Assigning a Category to parts of a int/... column uses the values of
# the Catgorical
df = pd.DataFrame({"a": [1, 1, 1, 1, 1],
"b": ["a", "a", "a", "a", "a"]})
exp = pd.DataFrame({"a": [1, "b", "b", 1, 1],
"b": ["a", "a", "b", "b", "a"]})
df.loc[1:2, "a"] = pd.Categorical(["b", "b"], categories=["a", "b"])
df.loc[2:3, "b"] = pd.Categorical(["b", "b"], categories=["a", "b"])
tm.assert_frame_equal(df, exp)
# Series
orig = Series(pd.Categorical(["b", "b"], categories=["a", "b"]))
s = orig.copy()
s[:] = "a"
exp = Series(pd.Categorical(["a", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[1] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[s.index > 0] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s[[False, True]] = "a"
exp = Series(pd.Categorical(["b", "a"], categories=["a", "b"]))
tm.assert_series_equal(s, exp)
s = orig.copy()
s.index = ["x", "y"]
s["y"] = "a"
exp = Series(
pd.Categorical(["b", "a"],
categories=["a", "b"]), index=["x", "y"])
tm.assert_series_equal(s, exp)
# ensure that one can set something to np.nan
s = Series(Categorical([1, 2, 3]))
exp = Series(Categorical([1, np.nan, 3]))
s[1] = np.nan
tm.assert_series_equal(s, exp)
def test_comparisons(self):
tests_data = [(list("abc"), list("cba"), list("bbb")),
([1, 2, 3], [3, 2, 1], [2, 2, 2])]
for data, reverse, base in tests_data:
cat_rev = pd.Series(pd.Categorical(data, categories=reverse,
ordered=True))
cat_rev_base = pd.Series(pd.Categorical(base, categories=reverse,
ordered=True))
cat = pd.Series(pd.Categorical(data, ordered=True))
cat_base = pd.Series(pd.Categorical(
base, categories=cat.cat.categories, ordered=True))
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
def f():
cat > "b"
self.assertRaises(TypeError, f)
cat = Series(Categorical(list("abc"), ordered=False))
def f():
cat > "b"
self.assertRaises(TypeError, f)
# https://github.com/pydata/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
self.assertRaises(TypeError, lambda: cat < "d")
self.assertRaises(TypeError, lambda: cat > "d")
self.assertRaises(TypeError, lambda: "d" < cat)
self.assertRaises(TypeError, lambda: "d" > cat)
self.assert_series_equal(cat == "d", Series([False, False, False]))
self.assert_series_equal(cat != "d", Series([True, True, True]))
# And test NaN handling...
cat = Series(Categorical(["a", "b", "c", np.nan]))
exp = Series([True, True, True, False])
res = (cat == cat)
tm.assert_series_equal(res, exp)
def test_cat_equality(self):
# GH 8938
# allow equality comparisons
a = Series(list('abc'), dtype="category")
b = Series(list('abc'), dtype="object")
c = Series(['a', 'b', 'cc'], dtype="object")
d = Series(list('acb'), dtype="object")
e = Categorical(list('abc'))
f = Categorical(list('acb'))
# vs scalar
self.assertFalse((a == 'a').all())
self.assertTrue(((a != 'a') == ~(a == 'a')).all())
self.assertFalse(('a' == a).all())
self.assertTrue((a == 'a')[0])
self.assertTrue(('a' == a)[0])
self.assertFalse(('a' != a)[0])
# vs list-like
self.assertTrue((a == a).all())
self.assertFalse((a != a).all())
self.assertTrue((a == list(a)).all())
self.assertTrue((a == b).all())
self.assertTrue((b == a).all())
self.assertTrue(((~(a == b)) == (a != b)).all())
self.assertTrue(((~(b == a)) == (b != a)).all())
self.assertFalse((a == c).all())
self.assertFalse((c == a).all())
self.assertFalse((a == d).all())
self.assertFalse((d == a).all())
# vs a cat-like
self.assertTrue((a == e).all())
self.assertTrue((e == a).all())
self.assertFalse((a == f).all())
self.assertFalse((f == a).all())
self.assertTrue(((~(a == e) == (a != e)).all()))
self.assertTrue(((~(e == a) == (e != a)).all()))
self.assertTrue(((~(a == f) == (a != f)).all()))
self.assertTrue(((~(f == a) == (f != a)).all()))
# non-equality is not comparable
self.assertRaises(TypeError, lambda: a < b)
self.assertRaises(TypeError, lambda: b < a)
self.assertRaises(TypeError, lambda: a > b)
self.assertRaises(TypeError, lambda: b > a)
def test_concat(self):
cat = pd.Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = pd.DataFrame({"cats": cat, "vals": vals})
cat2 = pd.Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = pd.DataFrame({"cats": cat2,
"vals": vals2}, index=pd.Index([0, 1, 0, 1]))
res = pd.concat([df, df])
tm.assert_frame_equal(exp, res)
# Concat should raise if the two categoricals do not have the same
# categories
cat3 = pd.Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_wrong_categories = pd.DataFrame({"cats": cat3, "vals": vals3})
def f():
pd.concat([df, df_wrong_categories])
self.assertRaises(ValueError, f)
# GH 7864
# make sure ordering is preserverd
df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"raw_grade": ['a', 'b', 'b', 'a', 'a', 'e']})
df["grade"] = pd.Categorical(df["raw_grade"])
df['grade'].cat.set_categories(['e', 'a', 'b'])
df1 = df[0:3]
df2 = df[3:]
self.assert_numpy_array_equal(df['grade'].cat.categories,
df1['grade'].cat.categories)
self.assert_numpy_array_equal(df['grade'].cat.categories,
df2['grade'].cat.categories)
dfx = pd.concat([df1, df2])
dfx['grade'].cat.categories
self.assert_numpy_array_equal(df['grade'].cat.categories,
dfx['grade'].cat.categories)
def test_concat_preserve(self):
# GH 8641
# series concat not preserving category dtype
s = Series(list('abc'), dtype='category')
s2 = Series(list('abd'), dtype='category')
def f():
pd.concat([s, s2])
self.assertRaises(ValueError, f)
result = pd.concat([s, s], ignore_index=True)
expected = Series(list('abcabc')).astype('category')
tm.assert_series_equal(result, expected)
result = pd.concat([s, s])
expected = Series(
list('abcabc'), index=[0, 1, 2, 0, 1, 2]).astype('category')
tm.assert_series_equal(result, expected)
a = Series(np.arange(6, dtype='int64'))
b = Series(list('aabbca'))
df2 = DataFrame({'A': a,
'B': b.astype('category', categories=list('cab'))})
result = pd.concat([df2, df2])
expected = DataFrame({'A': | pd.concat([a, a]) | pandas.concat |
from contextlib import nullcontext as does_not_raise
from functools import partial
import pandas as pd
from pandas.testing import assert_series_equal
from solarforecastarbiter import datamodel
from solarforecastarbiter.reference_forecasts import persistence
from solarforecastarbiter.conftest import default_observation
import pytest
def load_data_base(data, observation, data_start, data_end):
# slice doesn't care about closed or interval label
# so here we manually adjust start and end times
if 'instant' in observation.interval_label:
pass
elif observation.interval_label == 'ending':
data_start += pd.Timedelta('1s')
elif observation.interval_label == 'beginning':
data_end -= pd.Timedelta('1s')
return data[data_start:data_end]
@pytest.fixture
def powerplant_metadata():
"""1:1 AC:DC"""
modeling_params = datamodel.FixedTiltModelingParameters(
ac_capacity=200, dc_capacity=200, temperature_coefficient=-0.3,
dc_loss_factor=3, ac_loss_factor=0,
surface_tilt=30, surface_azimuth=180)
metadata = datamodel.SolarPowerPlant(
name='Albuquerque Baseline', latitude=35.05, longitude=-106.54,
elevation=1657.0, timezone='America/Denver',
modeling_parameters=modeling_params)
return metadata
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190404 1400'),
('ending', 'right', '20190404 1400'),
('instant', None, '20190404 1359')
])
def test_persistence_scalar(site_metadata, interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp(end, tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data=load_data)
expected_index = pd.date_range(
start='20190404 1300', end=end, freq='5min', tz=tz,
closed=closed)
expected = pd.Series(100., index=expected_index)
assert_series_equal(fx, expected)
@pytest.mark.parametrize('obs_interval_label', ('beginning', 'ending',
'instant'))
@pytest.mark.parametrize('interval_label,closed,end', [
('beginning', 'left', '20190406 0000'),
('ending', 'right', '20190406 0000'),
('instant', None, '20190405 2359')
])
def test_persistence_interval(site_metadata, obs_interval_label,
interval_label, closed, end):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label=obs_interval_label)
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
data = pd.Series(data_index.hour, index=data_index, dtype=float)
if obs_interval_label == 'ending':
# e.g. timestamp 12:00:00 should be equal to 11
data = data.shift(1).fillna(0)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed=closed)
expected_vals = list(range(0, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
# handle permutations of parameters that should fail
if data_end.minute == 59 and obs_interval_label != 'instant':
expectation = pytest.raises(ValueError)
elif data_end.minute == 0 and obs_interval_label == 'instant':
expectation = pytest.raises(ValueError)
else:
expectation = does_not_raise()
with expectation:
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected)
def test_persistence_interval_missing_data(site_metadata):
# interval beginning obs
observation = default_observation(
site_metadata, interval_length='5min',
interval_label='ending')
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404T1200', end='20190406', freq='5min', tz=tz)
# each element of data is equal to the hour value of its label
end = '20190406 0000'
data = pd.Series(data_index.hour, index=data_index, dtype=float)
data = data.shift(1)
data_start = pd.Timestamp('20190404 0000', tz=tz)
data_end = pd.Timestamp(end, tz=tz) - pd.Timedelta('1d')
forecast_start = pd.Timestamp('20190405 0000', tz=tz)
interval_length = pd.Timedelta('60min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start='20190405 0000', end=end, freq='60min', tz=tz, closed='right')
expected_vals = [None] * 12 + list(range(12, 24))
expected = pd.Series(expected_vals, index=expected_index, dtype=float)
fx = persistence.persistence_interval(
observation, data_start, data_end, forecast_start,
interval_length, 'ending', load_data)
assert_series_equal(fx, expected)
@pytest.fixture
def uniform_data():
tz = 'America/Phoenix'
data_index = pd.date_range(
start='20190404', end='20190406', freq='5min', tz=tz)
data = pd.Series(100., index=data_index)
return data
@pytest.mark.parametrize(
'interval_label,expected_index,expected_ghi,expected_ac,obsscale', (
('beginning',
['20190404 1300', '20190404 1330'],
[96.41150694741889, 91.6991546408236],
[96.60171202566896, 92.074796727846],
1),
('ending',
['20190404 1330', '20190404 1400'],
[96.2818141290749, 91.5132934827808],
[96.47816752344607, 91.89460837042301],
1),
# test clipped at 2x clearsky
('beginning',
['20190404 1300', '20190404 1330'],
[1926.5828549018618, 1832.4163238767312],
[383.1524464326973, 365.19729186262526],
50)
)
)
def test_persistence_scalar_index(
powerplant_metadata, uniform_data, interval_label,
expected_index, expected_ghi, expected_ac, obsscale):
# ac_capacity is 200 from above
observation = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning')
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label='beginning', variable='ac_power')
data = uniform_data * obsscale
tz = data.index.tzinfo
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
load_data = partial(load_data_base, data)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
expected_index, tz=tz, freq=interval_length)
expected = pd.Series(expected_ghi, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected = pd.Series(expected_ac, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_instant_obs_fx(
site_metadata, powerplant_metadata, uniform_data):
# instantaneous obs and fx
interval_length = pd.Timedelta('30min')
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
data = uniform_data
tz = data.index.tzinfo
load_data = partial(load_data_base, data)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1259', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1359', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.59022431746838, 91.99405501672328]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_values = [96.77231379880752, 92.36198028963426]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
# instant obs and fx, but with offset added to starts instead of ends
data_start = pd.Timestamp('20190404 1201', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1301', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
expected_index = pd.DatetimeIndex(
['20190404 1300', '20190404 1330'], tz=tz, freq=interval_length)
expected_values = [96.55340033645147, 91.89662922267517]
expected = pd.Series(expected_values, index=expected_index)
assert_series_equal(fx, expected, check_names=False)
def test_persistence_scalar_index_invalid_times_instant(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_label = 'instant'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
# instant obs that cover the whole interval - not allowed!
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
interval_length = pd.Timedelta('30min')
with pytest.raises(ValueError):
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
@pytest.mark.parametrize('data_start,data_end,forecast_start,forecast_end', (
('20190404 1201', '20190404 1300', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1259', '20190404 1300', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1301', '20190404 1400'),
('20190404 1200', '20190404 1300', '20190404 1300', '20190404 1359'),
))
def test_persistence_scalar_index_invalid_times_interval(
site_metadata, interval_label, data_start, data_end, forecast_start,
forecast_end):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
# base times to mess with
data_start = pd.Timestamp(data_start, tz=tz)
data_end = pd.Timestamp(data_end, tz=tz)
forecast_start = pd.Timestamp(forecast_start, tz=tz)
forecast_end = pd.Timestamp(forecast_end, tz=tz)
# interval average obs with invalid starts/ends
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
errtext = "with interval_label beginning or ending"
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert errtext in str(excinfo.value)
def test_persistence_scalar_index_invalid_times_invalid_label(site_metadata):
data = pd.Series(100., index=[0])
load_data = partial(load_data_base, data)
tz = 'America/Phoenix'
interval_length = pd.Timedelta('30min')
interval_label = 'invalid'
observation = default_observation(
site_metadata, interval_length='5min')
object.__setattr__(observation, 'interval_label', interval_label)
data_start = pd.Timestamp('20190404 1200', tz=tz)
data_end = pd.Timestamp('20190404 1300', tz=tz)
forecast_start = pd.Timestamp('20190404 1300', tz=tz)
forecast_end = pd.Timestamp('20190404 1400', tz=tz)
with pytest.raises(ValueError) as excinfo:
persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert "invalid interval_label" in str(excinfo.value)
def test_persistence_scalar_index_low_solar_elevation(
site_metadata, powerplant_metadata):
interval_label = 'beginning'
observation = default_observation(
site_metadata, interval_length='5min', interval_label=interval_label)
observation_ac = default_observation(
powerplant_metadata, interval_length='5min',
interval_label=interval_label, variable='ac_power')
# at ABQ Baseline, solar apparent zenith for these points is
# 2019-05-13 12:00:00+00:00 91.62
# 2019-05-13 12:05:00+00:00 90.09
# 2019-05-13 12:10:00+00:00 89.29
# 2019-05-13 12:15:00+00:00 88.45
# 2019-05-13 12:20:00+00:00 87.57
# 2019-05-13 12:25:00+00:00 86.66
tz = 'UTC'
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
index = pd.date_range(start=data_start, end=data_end,
freq='5min', closed='left')
# clear sky 5 min avg (from 1 min avg) GHI is
# [0., 0.10932908, 1.29732454, 4.67585122, 10.86548521, 19.83487399]
# create data series that could produce obs / clear of
# 0/0, 1/0.1, -1/1.3, 5/5, 10/10, 20/20
# average without limits is (10 - 1 + 1 + 1 + 1) / 5 = 2.4
# average with element limits of [0, 2] = (2 + 0 + 1 + 1 + 1) / 5 = 1
data = pd.Series([0, 1, -1, 5, 10, 20.], index=index)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(
start=forecast_start, end=forecast_end, freq='5min', closed='left')
# clear sky 5 min avg GHI is
# [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected_vals = [31.2, 44.5, 59.4, 75.4, 92.4, 110.1]
expected = pd.Series(expected_vals, index=expected_index)
fx = persistence.persistence_scalar_index(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
expected = pd.Series([0.2, 0.7, 1.2, 1.6, 2., 2.5], index=expected_index)
fx = persistence.persistence_scalar_index(
observation_ac, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data)
assert_series_equal(fx, expected, check_less_precise=1, check_names=False)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0, 0, 0, 20, 20, 20], 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0, 0, 0, 4, 4, 4], 'y', [50], [2]),
# invalid axis
pytest.param([0, 0, 0, 4, 4, 4], 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic(site_metadata, interval_label, obs_values,
axis, constant_values, expected_values):
tz = 'UTC'
interval_length = '5min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='5min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
([0] * 11 + [20] * 11, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 11 + [4] * 11, 'y', [50], [2]),
# invalid axis
pytest.param([0] * 11 + [4] * 11, 'percentile', [-1], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
# insufficient observation data
pytest.param([5.3, 7.3, 1.4] * 4, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([], 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
pytest.param([None]*10, 'x', [50], None,
marks=pytest.mark.xfail(raises=ValueError, strict=True)),
])
def test_persistence_probabilistic_timeofday(site_metadata, obs_values, axis,
constant_values, expected_values):
tz = 'UTC'
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_end = pd.Timestamp('20190513T0900', tz=tz)
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_start = pd.Timestamp('20190514T0900', tz=tz)
forecast_end = pd.Timestamp('20190514T1000', tz=tz)
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("data_end,forecast_start", [
# no timezone
(pd.Timestamp("20190513T0900"), pd.Timestamp("20190514T0900")),
# same timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# different timezone
(
pd.Timestamp("20190513T0200", tz="US/Pacific"),
pd.Timestamp("20190514T0900", tz="UTC")
),
# obs timezone, but no fx timezone
(
pd.Timestamp("20190513T0900", tz="UTC"),
pd.Timestamp("20190514T0900")
),
# no obs timezone, but fx timezone
(
pd.Timestamp("20190513T0900"),
pd.Timestamp("20190514T0900", tz="UTC")
),
])
def test_persistence_probabilistic_timeofday_timezone(site_metadata, data_end,
forecast_start):
obs_values = [0] * 11 + [20] * 11
axis, constant_values, expected_values = 'x', [10, 20], [50, 100]
interval_label = "beginning"
interval_length = '1h'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
# all observations at 9am each day
data_start = data_end - pd.Timedelta("{}D".format(len(obs_values)))
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1D',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
# forecast 9am
forecast_end = forecast_start + pd.Timedelta("1h")
interval_length = pd.Timedelta('1h')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
# if forecast without timezone, then use obs timezone
if data.index.tzinfo is not None and forecast_start.tzinfo is None:
expected_index = expected_index.tz_localize(data.index.tzinfo)
forecasts = persistence.persistence_probabilistic_timeofday(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
@pytest.mark.parametrize("interval_label", [
'beginning', 'ending'
])
@pytest.mark.parametrize("obs_values,axis,constant_values,expected_values", [
# constant_values = variable values
# forecasts = percentiles [%]
([0] * 15 + [20] * 15, 'x', [10, 20], [50, 100]),
# constant_values = percentiles [%]
# forecasts = variable values
([0] * 15 + [4] * 15, 'y', [50], [2]),
([None] * 30, 'y', [50], [None]),
([0] * 10 + [None] * 10 + [20] * 10, 'x', [10, 20], [50, 100]),
([0] * 10 + [None] * 10 + [4] * 10, 'y', [50], [2]),
])
def test_persistence_probabilistic_resampling(
site_metadata,
interval_label,
obs_values, axis,
constant_values,
expected_values
):
tz = 'UTC'
interval_length = '1min'
observation = default_observation(
site_metadata,
interval_length=interval_length,
interval_label=interval_label
)
data_start = pd.Timestamp('20190513 1200', tz=tz)
data_end = pd.Timestamp('20190513 1230', tz=tz)
closed = datamodel.CLOSED_MAPPING[interval_label]
index = pd.date_range(start=data_start, end=data_end, freq='1min',
closed=closed)
data = pd.Series(obs_values, index=index, dtype=float)
forecast_start = pd.Timestamp('20190513 1230', tz=tz)
forecast_end = pd.Timestamp('20190513 1300', tz=tz)
interval_length = pd.Timedelta('5min')
load_data = partial(load_data_base, data)
expected_index = pd.date_range(start=forecast_start, end=forecast_end,
freq=interval_length, closed=closed)
forecasts = persistence.persistence_probabilistic(
observation, data_start, data_end, forecast_start, forecast_end,
interval_length, interval_label, load_data, axis, constant_values
)
assert isinstance(forecasts, list)
for i, fx in enumerate(forecasts):
pd.testing.assert_index_equal(fx.index, expected_index,
check_categorical=False)
pd.testing.assert_series_equal(
fx,
pd.Series(expected_values[i], index=expected_index, dtype=float)
)
# all observations 9-10 each day.
# This index is for (09:00, 10:00] (interval_label=ending), but subtract
# 30 minutes for [09:00, 10:00) (interval_label=beginning)
PROB_PERS_TOD_OBS_INDEX = pd.DatetimeIndex([
'2019-04-21 09:30:00+00:00', '2019-04-21 10:00:00+00:00',
'2019-04-22 09:30:00+00:00', '2019-04-22 10:00:00+00:00',
'2019-04-23 09:30:00+00:00', '2019-04-23 10:00:00+00:00',
'2019-04-24 09:30:00+00:00', '2019-04-24 10:00:00+00:00',
'2019-04-25 09:30:00+00:00', '2019-04-25 10:00:00+00:00',
'2019-04-26 09:30:00+00:00', '2019-04-26 10:00:00+00:00',
'2019-04-27 09:30:00+00:00', '2019-04-27 10:00:00+00:00',
'2019-04-28 09:30:00+00:00', '2019-04-28 10:00:00+00:00',
'2019-04-29 09:30:00+00:00', '2019-04-29 10:00:00+00:00',
'2019-04-30 09:30:00+00:00', '2019-04-30 10:00:00+00:00',
'2019-05-01 09:30:00+00:00', '2019-05-01 10:00:00+00:00',
'2019-05-02 09:30:00+00:00', '2019-05-02 10:00:00+00:00',
'2019-05-03 09:30:00+00:00', '2019-05-03 10:00:00+00:00',
'2019-05-04 09:30:00+00:00', '2019-05-04 10:00:00+00:00',
'2019-05-05 09:30:00+00:00', '2019-05-05 10:00:00+00:00',
'2019-05-06 09:30:00+00:00', '2019-05-06 10:00:00+00:00',
'2019-05-07 09:30:00+00:00', '2019-05-07 10:00:00+00:00',
'2019-05-08 09:30:00+00:00', '2019-05-08 10:00:00+00:00',
'2019-05-09 09:30:00+00:00', '2019-05-09 10:00:00+00:00',
'2019-05-10 09:30:00+00:00', '2019-05-10 10:00:00+00:00',
'2019-05-11 09:30:00+00:00', '2019-05-11 10:00:00+00:00',
'2019-05-12 09:30:00+00:00', '2019-05-12 10:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
@pytest.mark.parametrize('obs_interval_label_index', [
('beginning', PROB_PERS_TOD_OBS_INDEX - pd.Timedelta('30min')),
('ending', PROB_PERS_TOD_OBS_INDEX)
])
@pytest.mark.parametrize('fx_interval_label_index', [
('beginning', | pd.DatetimeIndex(['20190514T0900Z'], freq='1h') | pandas.DatetimeIndex |
##############################
## COVID_common.py ##
## <NAME> ##
## Version 2021.09.05 ##
##############################
import os
import sys
import warnings
import collections as clt
import calendar as cld
import datetime as dtt
import copy
import json
import numpy as np
import scipy as sp
import scipy.signal as signal
import pandas as pd
################################################################################
## Parameters
DATA_PATH = '/home/linc/21_Codes/COVID_breakdown/'
ISO_DATE_REF = '2020-01-01'
ISO_DATE_REF_VACC = '2021-03-01'
NB_LOOKBACK_DAYS = 90
PAGE_LATEST = 'latest'
PAGE_OVERALL = 'overall'
PAGE_2022 = '2022'
PAGE_2021 = '2021'
PAGE_2020 = '2020'
PAGE_LIST = [PAGE_LATEST, PAGE_OVERALL, PAGE_2021, PAGE_2020]
SYMPTOM_DICT = {
'sneezing': {'zh-tw': '鼻腔症狀', 'fr': 'éternuement'},
'cough': {'zh-tw': '咳嗽', 'fr': 'toux'},
'throatache': {'zh-tw': '喉嚨症狀', 'fr': 'mal de gorge'},
'earache': {'zh-tw': '耳朵痛', 'fr': 'otalgie'},
'dyspnea': {'zh-tw': '呼吸困難', 'fr': 'dyspnée'},
'bronchitis': {'zh-tw': '支氣管炎', 'fr': 'bronchite'},
'pneumonia': {'zh-tw': '肺炎', 'fr': 'pneumonie'},
'fever': {'zh-tw': '發燒', 'fr': 'fièvre'},
'chills': {'zh-tw': '畏寒', 'fr': 'frissons'},
'nausea': {'zh-tw': '噁心', 'fr': 'nausée'},
'vomiting': {'zh-tw': '嘔吐', 'fr': 'vomissement'},
'diarrhea': {'zh-tw': '腹瀉', 'fr': 'diarrhée'},
'headache': {'zh-tw': '頭痛', 'fr': 'mal de tête'},
'eyes sore': {'zh-tw': '眼痛', 'fr': 'mal aux yeux'},
'chest pain': {'zh-tw': '胸痛', 'fr': 'mal à la poitrine'},
'stomachache': {'zh-tw': '腹痛', 'fr': 'mal de ventre'},
'backache': {'zh-tw': '背痛', 'fr': 'mal de dos'},
'toothache': {'zh-tw': '牙痛', 'fr': 'mal de dents'},
'rash': {'zh-tw': '出疹', 'fr': 'rash'},
'fatigue': {'zh-tw': '倦怠', 'fr': 'fatigue'},
'soreness': {'zh-tw': '痠痛', 'fr': 'myalgie'},
'hypersomnia': {'zh-tw': '嗜睡', 'fr': 'hypersomnie'},
'insomnia': {'zh-tw': '失眠', 'fr': 'insomnie'},
'dysnosmia': {'zh-tw': '嗅覺異常', 'fr': 'dysosmie'},
'dysgeusia': {'zh-tw': '味覺異常', 'fr': 'dysgueusie'},
'tonsillitis': {'zh-tw': '淋巴腫脹', 'fr': 'adénopathie'},
'hypoglycemia': {'zh-tw': '低血糖', 'fr': 'hypoglycémie'},
'hypoxemia': {'zh-tw': '低血氧', 'fr': 'hypoxémie'},
'anorexia': {'zh-tw': '食慾不佳', 'fr': 'anorexie'},
'arrhythmia': {'zh-tw': '心律不整', 'fr': 'arythmie'},
'coma': {'zh-tw': '意識不清', 'fr': 'coma'},
'symptomatic': {'zh-tw': '有症狀', 'fr': 'symptomatique'},
'asymptomatic': {'zh-tw': '無症狀', 'fr': 'asymptomatique'}
}
TRAVEL_HISTORY_DICT = {
## Far-East Asia
'Bangladesh': {'zh-tw': '孟加拉', 'fr': 'Bangladesh'},
'Cambodia': {'zh-tw': '柬埔寨', 'fr': 'Cambodge'},
'China': {'zh-tw': '中國', 'fr': 'Chine'},
'Hong Kong': {'zh-tw': '香港', 'fr': 'Hong Kong'},
'Indonesia': {'zh-tw': '印尼', 'fr': 'Indonésie'},
'India': {'zh-tw': '印度', 'fr': 'Inde'},
'Japan': {'zh-tw': '日本', 'fr': 'Japon'},
'Korea': {'zh-tw': '韓國', 'fr': 'Corée'},
'Macao': {'zh-tw': '澳門', 'fr': 'Macao'},
'Malaysia': {'zh-tw': '馬來西亞', 'fr': 'Malaisie'},
'Mongolia': {'zh-tw': '蒙古', 'fr': 'Mongolie'},
'Myanmar': {'zh-tw': '緬甸', 'fr': 'Myanmar'},
'Nepal': {'zh-tw': '尼泊爾', 'fr': 'Népal'},
'Pakistan': {'zh-tw': '巴基斯坦', 'fr': 'Pakistan'},
'Philippines': {'zh-tw': '菲律賓', 'fr': 'Philippines'},
'Singapore': {'zh-tw': '新加坡', 'fr': 'Singapour'},
'Thailand': {'zh-tw': '泰國', 'fr': 'Thaïlande'},
'Vietnam': {'zh-tw': '越南', 'fr': 'Vietnam'},
## West & Central Asia
'Afghanistan': {'zh-tw': '阿富汗', 'fr': 'Afghanistan'},
'Iran': {'zh-tw': '伊朗', 'fr': 'Iran'},
'Kazakhstan': {'zh-tw': '哈薩克', 'fr': 'Kazakhstan'},
'Kyrgyzstan': {'zh-tw': '吉爾吉斯', 'fr': 'Kirghizistan'},
'Oman': {'zh-tw': '阿曼', 'fr': 'Oman'},
'Qatar': {'zh-tw': '卡達', 'fr': 'Qatar'},
'Saudi Arabia': {'zh-tw': '沙烏地阿拉伯', 'fr': 'Arabie saoudite'},
'Syria': {'zh-tw': '敘利亞', 'fr': 'Syrie'},
'Turkey': {'zh-tw': '土耳其', 'fr': 'Turquie'},
'UAE': {'zh-tw': '阿拉伯聯合大公國', 'fr': 'EAU'},
'Uzbekistan': {'zh-tw': '烏茲別克', 'fr': 'Ouzbékistan'},
## Europe
'Europe': {'zh-tw': '歐洲', 'fr': 'Europe'},
'Albania': {'zh-tw': '阿爾巴尼亞', 'fr': 'Albanie'},
'Armania': {'zh-tw': '亞美尼亞', 'fr': 'Arménie'},
'Austria': {'zh-tw': '奧地利', 'fr': 'Autriche'},
'Belarus': {'zh-tw': '白俄羅斯', 'fr': 'Biélorussie'},
'Belgium': {'zh-tw': '比利時', 'fr': 'Belgique'},
'Bulgaria': {'zh-tw': '保加利亞', 'fr': 'Bulgarie'},
'Croatia': {'zh-tw': '克羅埃西亞', 'fr': 'Croatie'},
'Czechia': {'zh-tw': '捷克', 'fr': 'Tchéquie'},
'Danmark': {'zh-tw': '丹麥', 'fr': 'Danemark'},
'Finland': {'zh-tw': '芬蘭', 'fr': 'Finlande'},
'France': {'zh-tw': '法國', 'fr': 'France'},
'Germany': {'zh-tw': '德國', 'fr': 'Allemagne'},
'Greece': {'zh-tw': '希臘', 'fr': 'Grèce'},
'Iceland': {'zh-tw': '冰島', 'fr': 'Islande'},
'Ireland': {'zh-tw': '愛爾蘭', 'fr': 'Irlande'},
'Italy': {'zh-tw': '義大利', 'fr': 'Italie'},
'Hungary': {'zh-tw': '匈牙利', 'fr': 'Hongrie'},
'Lithuania': {'zh-tw': '立陶宛', 'fr': 'Lituanie'},
'Luxemburg': {'zh-tw': '盧森堡', 'fr': 'Luxembourg'},
'Netherlands': {'zh-tw': '荷蘭', 'fr': 'Pays-Bas'},
'Poland': {'zh-tw': '波蘭', 'fr': 'Pologne'},
'Portugal': {'zh-tw': '葡萄牙', 'fr': 'Portugal'},
'Romania': {'zh-tw': '羅馬尼亞', 'fr': 'Roumanie'},
'Russia': {'zh-tw': '俄羅斯', 'fr': 'Russie'},
'Serbia': {'zh-tw': '塞爾維亞', 'fr': 'Serbie'},
'Slovakia': {'zh-tw': '斯洛伐克', 'fr': 'Slovaquie'},
'Spain': {'zh-tw': '西班牙', 'fr': 'Espagne'},
'Sweden': {'zh-tw': '瑞典', 'fr': 'Suède'},
'Switzerland': {'zh-tw': '瑞士', 'fr': 'Suisse'},
'UK': {'zh-tw': '英國', 'fr': 'Royaume-Uni'},
'Ukraine': {'zh-tw': '烏克蘭', 'fr': 'Ukraine'},
## Africa
'Africa': {'zh-tw': '非洲', 'fr': 'Afrique'},
'Burkina Faso': {'zh-tw': '布吉納法索', 'fr': 'Burkina Faso'},
'Cameroon': {'zh-tw': '喀麥隆', 'fr': 'Cameroun'},
'Eswatini': {'zh-tw': '史瓦帝尼', 'fr': 'Eswatini'},
'Egypt': {'zh-tw': '埃及', 'fr': 'Égypte'},
'Ethiopia': {'zh-tw': '衣索比亞', 'fr': 'Éthiopie'},
'Gambia': {'zh-tw': '甘比亞', 'fr': 'Gambie'},
'Ghana': {'zh-tw': '迦納', 'fr': 'Ghana'},
'Kenya': {'zh-tw': '肯亞', 'fr': 'Kenya'},
'Lesotho': {'zh-tw': '賴索托', 'fr': 'Lesotho'},
'Mauritania': {'zh-tw': '茅利塔尼亞', 'fr': 'Mauritanie'},
'Morocco': {'zh-tw': '摩洛哥', 'fr': 'Maroc'},
'Nigeria': {'zh-tw': '奈及利亞', 'fr': 'Nigéria'},
'Senegal': {'zh-tw': '塞內加爾', 'fr': 'Sénégal'},
'Somaliland': {'zh-tw': '索馬利蘭', 'fr': 'Somaliland'},
'South Africa': {'zh-tw': '南非', 'fr': 'Afrique du Sud'},
'Tunisia': {'zh-tw': '突尼西亞', 'fr': 'Tunisie'},
'Uganda': {'zh-tw': '烏干達', 'fr': 'Ouganda'},
## North & South America
'Argentina': {'zh-tw': '阿根廷', 'fr': 'Argentine'},
'Belize': {'zh-tw': '貝里斯', 'fr': 'Belize'},
'Bolivia': {'zh-tw': '玻利維亞', 'fr': 'Bolivie'},
'Brazil': {'zh-tw': '巴西', 'fr': 'Brésil'},
'Canada': {'zh-tw': '加拿大', 'fr': 'Canada'},
'Chile': {'zh-tw': '智利', 'fr': 'Chili'},
'Dominican Republic': {'zh-tw': '多明尼加', 'fr': 'République dominicaine'},
'Guatemala': {'zh-tw': '瓜地馬拉', 'fr': 'Guatemala'},
'Haiti': {'zh-tw': '海地', 'fr': 'Haïti'},
'Honduras': {'zh-tw': '宏都拉斯', 'fr': 'Honduras'},
'Latin America': {'zh-tw': '中南美洲', 'fr': 'Amérique latine'},
'Mexico': {'zh-tw': '墨西哥', 'fr': 'Mexique'},
'Nicaragua': {'zh-tw': '尼加拉瓜', 'fr': 'Nicaragua'},
'Paraguay': {'zh-tw': '巴拉圭', 'fr': 'Paraguay'},
'Peru': {'zh-tw': '秘魯', 'fr': 'Pérou'},
'USA': {'zh-tw': '美國', 'fr': 'États-Unis'},
## Oceania
'Australia': {'zh-tw': '澳洲', 'fr': 'Australie'},
'Marshall Islands': {'zh-tw': '馬紹爾', 'fr': 'Îles Marshall'},
'New Zealand': {'zh-tw': '紐西蘭', 'fr': 'Nouvelle-Zélande'},
'Palau': {'zh-tw': '帛琉', 'fr': 'Palaos'},
## Others
'Antarctica': {'zh-tw': '南極', 'fr': 'Antartique'},
'Coral Princess': {'zh-tw': '珊瑚公主號', 'fr': 'Coral Princess'},
'Diamond Princess': {'zh-tw': '鑽石公主號', 'fr': 'Diamond Princess'},
'Pan-Shi': {'zh-tw': '磐石艦', 'fr': 'Pan-Shi'},
'local': {'zh-tw': '無', 'fr': 'local'}
}
AGE_DICT = {
'0s': {'zh-tw': '0-9歲', 'fr': '0-9 ans'},
'10s': {'zh-tw': '10-19歲', 'fr': '10aine'},
'20s': {'zh-tw': '20-29歲', 'fr': '20aine'},
'30s': {'zh-tw': '30-39歲', 'fr': '30aine'},
'40s': {'zh-tw': '40-49歲', 'fr': '40aine'},
'50s': {'zh-tw': '50-59歲', 'fr': '50aine'},
'60s': {'zh-tw': '60-69歲', 'fr': '60aine'},
'70s': {'zh-tw': '70-79歲', 'fr': '70aine'},
'80s': {'zh-tw': '80-89歲', 'fr': '80aine'},
'90s': {'zh-tw': '90-99歲', 'fr': '90aine'},
'100+': {'zh-tw': '100+歲', 'fr': '100+'},
}
AGE_DICT_2 = {
'label': {
'0-4': {'zh-tw': '0-4歲', 'fr': '0-4 ans', 'en': '0-4 yo'},
'5-9': {'zh-tw': '5-9歲', 'fr': '5-9 ans', 'en': '5-9 yo'},
'10-14': {'zh-tw': '10-14歲', 'fr': '10-14 ans', 'en': '10-14 yo'},
'15-19': {'zh-tw': '15-19歲', 'fr': '15-19 ans', 'en': '15-19 yo'},
'20-24': {'zh-tw': '20-24歲', 'fr': '20-24 ans', 'en': '20-24 yo'},
'25-29': {'zh-tw': '25-29歲', 'fr': '25-29 ans', 'en': '25-29 yo'},
'30-34': {'zh-tw': '30-34歲', 'fr': '30-34 ans', 'en': '30-34 yo'},
'35-39': {'zh-tw': '35-39歲', 'fr': '35-39 ans', 'en': '35-39 yo'},
'40-44': {'zh-tw': '40-44歲', 'fr': '40-44 ans', 'en': '40-44 yo'},
'45-49': {'zh-tw': '45-49歲', 'fr': '45-49 ans', 'en': '45-49 yo'},
'50-54': {'zh-tw': '50-54歲', 'fr': '50-54 ans', 'en': '50-54 yo'},
'55-59': {'zh-tw': '55-59歲', 'fr': '55-59 ans', 'en': '55-59 yo'},
'60-64': {'zh-tw': '60-64歲', 'fr': '60-64 ans', 'en': '60-64 yo'},
'65-69': {'zh-tw': '65-69歲', 'fr': '65-69 ans', 'en': '65-69 yo'},
'70+': {'zh-tw': '70+歲', 'fr': '70+ ans', 'en': '70+ yo'},
'total': {'zh-tw': '所有年齡', 'fr': 'Tous âges', 'en': 'All ages'},
},
'2019': {
'0-4': 975801, '5-9': 1019322, '10-14': 1015228, '15-19': 1254141, '20-24': 1514105, '25-29': 1609454,
'30-34': 1594132, '35-39': 1964945, '40-44': 1974288, '45-49': 1775328, '50-54': 1814146, '55-59': 1827585,
'60-64': 1657519, '65-69': 1379517, '70-74': 800166, '75-79': 609634, '80-84': 426615, '85-89': 250664,
'90-94': 111099, '95-99': 25626, '100+': 3806,
},
'2020': {
'0-4': 925302, '5-9': 1064186, '10-14': 973908, '15-19': 1154426, '20-24': 1494883, '25-29': 1597613,
'30-34': 1583943, '35-39': 1894274, '40-44': 2016609, '45-49': 1760217, '50-54': 1806643, '55-59': 1824832,
'60-64': 1677085, '65-69': 1445839, '70-74': 902349, '75-79': 588493, '80-84': 445423, '85-89': 255428,
'90-94': 117104, '95-99': 28437, '100+': 4242,
},
}
COUNTY_DICT = {
'00000': dict( ## Total
tag = 'total',
label = ['Nationalwide', 'National', '全國'],
population = 23588597,
),
## Metropole
'63000': dict( ## Taipei
tag = 'Taipei',
label = ['Taipei', 'Taipei', '台北'],
population = 2635286,
),
'64000': dict( ## Kaohsiung
tag = 'Kaohsiung',
label = ['Kaohsiung', 'Kaohsiung', '高雄'],
population = 2773984,
),
'65000': dict( ## New_Taipei
tag = 'New_Taipei',
label = ['New Taipei', 'Nouveau Taipei', '新北'],
population = 4023620,
),
'66000': dict( ## Taichung
tag = 'Taichung',
label = ['Taichung', 'Taichung', '台中'],
population = 2816667,
),
'67000': dict( ## Tainan
tag = 'Tainan',
label = ['Tainan', 'Tainan', '台南'],
population = 1879115,
),
'68000': dict( ## Taoyuan
tag = 'Taoyuan',
label = ['Taoyuan', 'Taoyuan', '桃園'],
population = 2254363,
),
## County
'10002': dict( ## Yilan
tag = 'Yilan',
label = ['Yilan', 'Yilan', '宜蘭'],
population = 453951,
),
'10004': dict( ## Hsinchu
tag = 'Hsinchu',
label = ['Hsinchu County', 'Comté de Hsinchu', '竹縣'],
population = 565272,
),
'10005': dict( ## Miaoli
tag = 'Miaoli',
label = ['Miaoli', 'Miaoli', '苗栗'],
population = 544762,
),
'10007': dict( ## Changhua
tag = 'Changhua',
label = ['Changhua', 'Changhua', '彰化'],
population = 1271015,
),
'10008': dict( ## Nantou
tag = 'Nantou',
label = ['Nantou', 'Nantou', '南投'],
population = 493403,
),
'10009': dict( ## Yunlin
tag = 'Yunlin',
label = ['Yunlin', 'Yunlin', '雲林'],
population = 680050,
),
'10010': dict( ## Chiayi
tag = 'Chiayi',
label = ['Chiayi County', 'Comté de Chiayi', '嘉縣'],
population = 502007,
),
'10013': dict( ## Pingtung
tag = 'Pingtung',
label = ['Pingtung', 'Pingtung', '屏東'],
population = 817193,
),
'10014': dict( ## Taitung
tag = 'Taitung',
label = ['Taitung', 'Taitung', '台東'],
population = 216308,
),
'10015': dict( ## Hualien
tag = 'Hualien',
label = ['Hualien', 'Hualien', '花蓮'],
population = 325706,
),
'10016': dict( ## Penghu
tag = 'Penghu',
label = ['Penghu', 'Penghu', '澎湖'],
population = 105117,
),
## City
'10017': dict( ## Keelung
tag = 'Keelung',
label = ['Keelung', 'Keelung', '基隆'],
population = 371878,
),
'10018': dict( ## Hsinchu_C
tag = 'Hsinchu_C',
label = ['Hsinchu City', 'Ville de Hsinchu', '竹市'],
population = 448207,
),
'10020': dict( ## Chiayi_C
tag = 'Chiayi_C',
label = ['Chiayi City', 'Ville de Chiayi', '嘉市'],
population = 270254,
),
## 09
'09007': dict( ## Matsu
tag = 'Matsu',
label = ['Matsu', 'Matsu', '馬祖'],
population = 12716,
),
'09020': dict( ## Kinmen
tag = 'Kinmen',
label = ['Kinmen', 'Kinmen', '金門'],
population = 127723,
),
}
COUNTY_DICT_2 = {
'基隆市': 'Keelung',
'台北市': 'Taipei',
'臺北市': 'Taipei',
'新北市': 'New_Taipei',
'桃園市': 'Taoyuan',
'新竹縣': 'Hsinchu',
'新竹市': 'Hsinchu_C',
'苗栗縣': 'Miaoli',
'台中市': 'Taichung',
'臺中市': 'Taichung',
'彰化縣': 'Changhua',
'南投縣': 'Nantou',
'雲林縣': 'Yunlin',
'嘉義縣': 'Chiayi',
'嘉義市': 'Chiayi_C',
'台南市': 'Tainan',
'臺南市': 'Tainan',
'高雄市': 'Kaohsiung',
'屏東縣': 'Pingtung',
'宜蘭縣': 'Yilan',
'花蓮縣': 'Hualien',
'台東縣': 'Taitung',
'臺東縣': 'Taitung',
'澎湖縣': 'Penghu',
'金門縣': 'Kinmen',
'連江縣': 'Matsu',
'空值': 'unknown',
}
BRAND_DICT = {
'ALL': 'total',
'Oxford/AstraZeneca': 'AZ',
'Moderna': 'Moderna',
'高端': 'Medigen',
'\u9ad8\u7aef': 'Medigen',
'BNT': 'Pfizer',
}
DELIVERY_LIST = [
## brand, source, quantity, delivery_date, available_date, delivery_news, available_news
[ 'AZ', 'AZ', 116500, '2021-03-03', '2021-03-22', 'https://www.cna.com.tw/news/firstnews/202103035003.aspx', 'https://www.cna.com.tw/news/firstnews/202103225002.aspx'],
[ 'AZ', 'COVAX', 198600, '2021-04-04', '2021-04-13', 'https://www.cna.com.tw/news/firstnews/202104040008.aspx', 'https://www.cna.com.tw/news/firstnews/202104120047.aspx'],
[ 'AZ', 'COVAX', 409800, '2021-05-19', '2021-05-27', 'https://www.cna.com.tw/news/firstnews/202105190224.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600326'],
['Moderna', 'Moderna', 148800, '2021-05-28', '2021-06-08', 'https://www.cna.com.tw/news/firstnews/202105285010.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600375'],
[ 'AZ', 'Japan', 1237860, '2021-06-04', '2021-06-12', 'https://www.cna.com.tw/news/firstnews/202106045008.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600389'],
['Moderna', 'Moderna', 239400, '2021-06-18', '2021-06-26', 'https://www.cna.com.tw/news/firstnews/202106180294.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600409'],
['Moderna', 'USA', 2498440, '2021-06-20', '2021-06-29', 'https://www.cna.com.tw/news/firstnews/202106205005.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600416'],
['Moderna', 'Moderna', 409800, '2021-06-30', '2021-07-08', 'https://www.cna.com.tw/news/firstnews/202106305007.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600434'],
[ 'AZ', 'AZ', 625900, '2021-07-07', '2021-07-15', 'https://www.cna.com.tw/news/firstnews/202107070181.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600446'],
[ 'AZ', 'Japan', 1131780, '2021-07-08', '2021-07-16', 'https://www.cna.com.tw/news/firstnews/202107085007.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600448'],
[ 'AZ', 'Japan', 973480, '2021-07-15', '2021-07-23', 'https://www.cna.com.tw/news/firstnews/202107155011.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600455'],
[ 'AZ', 'AZ', 560100, '2021-07-15', '2021-07-23', 'https://www.cna.com.tw/news/firstnews/202107150245.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600455'],
['Moderna', 'Moderna', 349200, '2021-07-15', '2021-07-23', 'https://www.cna.com.tw/news/firstnews/202107150215.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600455'],
['Medigen', 'Medigen', 265528, '', '2021-07-31', '', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600469'],
[ 'AZ', 'AZ', 581400, '2021-07-27', '2021-08-04', 'https://www.cna.com.tw/news/firstnews/202107270203.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600479'],
['Medigen', 'Medigen', 86910, '', '2021-08-05', '', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600479'],
[ 'AZ', 'Lithuania', 19400, '2021-07-31', '2021-08-10', 'https://www.cna.com.tw/news/firstnews/202107310085.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600484'],
['Moderna', 'Moderna', 99000, '2021-08-08', '2021-08-17', 'https://www.cna.com.tw/news/firstnews/202108090129.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600498'],
['Medigen', 'Medigen', 261766, '', '2021-08-17', '', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600498'],
[ 'AZ', 'AZ', 524200, '2021-08-12', '2021-08-20', 'https://www.cna.com.tw/news/firstnews/202108120201.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600505'],
['Moderna', 'Moderna', 249000, '2021-08-15', '2021-08-24', 'https://www.cna.com.tw/news/firstnews/202108155005.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600513'],
['Medigen', 'Medigen', 263586, '', '2021-08-24', '', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600513'],
[ 'AZ', 'AZ', 264400, '2021-08-27', '2021-09-04', 'https://www.cna.com.tw/news/firstnews/202108275002.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600542'],
['Moderna', 'Czechia', 28800, '2021-08-29', '2021-09-07', 'https://www.cna.com.tw/news/firstnews/202108290099.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600550'],
[ 'AZ', 'AZ', 594900, '2021-08-31', '2021-09-07', 'https://www.cna.com.tw/news/firstnews/202108310186.aspx', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600550'],
['Medigen', 'Medigen', 86935, '', '2021-09-08', '', 'https://www.fda.gov.tw/TC/newsContent.aspx?cid=4&id=t600550'],
[ 'AZ', 'Poland', 400000, '2021-09-05', '', 'https://www.cna.com.tw/news/firstnews/202109050008.aspx', 'https://www.fda.gov.tw/TC/includes/GetFile.ashx?id=f637679223404230332'],
[ 'AZ', 'COVAX', 409800, '2021-09-05', '', 'https://www.cna.com.tw/news/firstnews/202109055005.aspx', 'https://www.fda.gov.tw/TC/includes/GetFile.ashx?id=f637679223404230332'],
[ 'AZ', 'Japan', 64000, '2021-09-07', '', 'https://www.cna.com.tw/news/firstnews/202109075007.aspx', 'https://www.fda.gov.tw/TC/includes/GetFile.ashx?id=f637679223404230332'],
[ 'Pfizer', 'Pfizer', 933072, '2021-09-02', '2021-09-17', 'https://www.cna.com.tw/news/firstnews/202109025001.aspx', 'https://www.cna.com.tw/news/firstnews/202109160346.aspx'],
[ 'Pfizer', 'Pfizer', 909672, '2021-09-09', '2021-09-17', 'https://www.cna.com.tw/news/firstnews/202109090011.aspx', 'https://www.cna.com.tw/news/firstnews/202109160346.aspx'],
[ 'AZ', 'AZ', 458000, '2021-09-10', '', 'https://www.cna.com.tw/news/firstnews/202109105008.aspx', 'https://www.fda.gov.tw/TC/includes/GetFile.ashx?id=f637679223404230332'],
['Medigen', 'Medigen', 171681, '', '2021-09-18', '', 'https://www.fda.gov.tw/TC/includes/GetFile.ashx?id=f637679223404230332'],
[ 'AZ', 'AZ', 640000, '2021-09-17', '', 'https://www.cna.com.tw/news/firstnews/202109175007.aspx', ''],
['Moderna', 'Moderna', 1080000, '2021-09-17', '', 'https://www.cna.com.tw/news/firstnews/202109180003.aspx', ''],
['Medigen', 'Medigen', 560000, '', '2021-09-30', '', 'https://newtalk.tw/news/view/2021-09-30/644088'],
[ 'AZ', 'Japan', 500000, '2021-09-25', '', 'https://www.cna.com.tw/news/firstnews/202109250088.aspx', ''],
[ 'Pfizer', 'Pfizer', 550000, '2021-09-30', '', 'https://www.cna.com.tw/news/firstnews/202109300013.aspx', ''],
[ 'AZ', 'AZ', 656000, '2021-09-30', '', 'https://www.cna.com.tw/news/firstnews/202109305005.aspx', ''],
[ 'Pfizer', 'Pfizer', 670000, '2021-10-01', '', 'https://www.cna.com.tw/news/firstnews/202110010026.aspx', ''],
[ 'Pfizer', 'Pfizer', 270000, '2021-10-04', '', 'https://www.cna.com.tw/news/firstnews/202110040005.aspx', ''],
[ 'Pfizer', 'Pfizer', 889200, '2021-10-07', '', 'https://www.cna.com.tw/news/firstnews/202110070168.aspx', ''],
[ 'Pfizer', 'Pfizer', 800000, '2021-10-08', '', 'https://www.cna.com.tw/news/firstnews/202110080023.aspx', ''],
['Moderna', 'Moderna', 1132100, '2021-10-09', '', 'https://www.cna.com.tw/news/firstnews/202110090004.aspx', ''],
[ 'AZ', 'Lithuania', 235900, '2021-10-09', '', 'https://www.cna.com.tw/news/firstnews/202110095008.aspx', ''],
[ 'AZ', 'AZ', 1360000, '2021-10-13', '', 'https://www.cna.com.tw/news/firstnews/202110135010.aspx', ''],
[ 'Pfizer', 'Pfizer', 827000, '2021-10-14', '', 'https://www.cna.com.tw/news/firstnews/202110140028.aspx', ''],
]
################################################################################
## Global variables
README_DICT = {}
################################################################################
## Functions - files I/O
def loadCsv(name, verbose=True, **kwargs):
data = pd.read_csv(name, dtype=object, skipinitialspace=True, **kwargs)
if verbose:
print('Loaded \"%s\"' % name)
return data
def saveCsv(name, data, verbose=True):
data.to_csv(name, index=False)
if verbose:
print('Saved \"%s\"' % name)
return
def loadJson(name, verbose=True):
file_ = open(name, 'r')
data = json.load(file_)
file_.close()
if verbose:
print('Loaded \"%s\"' % name)
return data
################################################################################
## Functions - date
def ISODateToOrd(iso):
ord_ = dtt.date.fromisoformat(iso).toordinal()
return ord_
def ordDateToISO(ord_):
return dtt.date.fromordinal(ord_).isoformat()
def getTodayOrdinal():
today = dtt.datetime.today()
delta = dtt.timedelta(hours=12)
ord_today = (today - delta).toordinal() + 1
return ord_today
def numMonthToAbbr(num):
return cld.month_abbr[num]
################################################################################
## Functions - other general utilities
def normalizeBoolArr(bool_arr):
bool_arr = bool_arr.astype(float)
bool_arr -= bool_arr.mean()
norm = np.sqrt(np.sum(bool_arr**2))
with warnings.catch_warnings(): ## Avoid division by zero
warnings.simplefilter("ignore")
bool_arr /= norm
return bool_arr
def centerOfBins(bins, area=False):
bins = np.array(bins, dtype=float)
left = bins[:-1]
right = bins[1:]
if area is True:
return np.sqrt(0.5 * (left**2 + right**2))
return 0.5 * (left + right)
def makeHist(data, bins, wgt=None, factor=1.0, pdf=False):
"""
Make the histogram such that the output can be plotted directly
Parameters
----------
data : array-like
bins : (1, N) float array
bin edges
factor : float, optional
rescaling factor for the histogram
pdf : bool, optional
make the output a pdf, i.e. normalized by the binwidth & the total counts
Returns
-------
n_arr : (1, N) float array
number counts, could be rescaled
ctr_bins : (1, N) float array
center of the bins
n_arr & ctr_bins have the same size.
"""
n_arr, bins = np.histogram(data, bins, weights=wgt)
ctr_bins = centerOfBins(bins)
if pdf == True:
n_arr = n_arr.astype(float) / (float(sum(n_arr)) * (bins[1:] - bins[:-1]))
else:
n_arr = n_arr.astype(float) * factor
return n_arr, ctr_bins
def sevenDayMovingAverage(value_arr):
value_arr = np.array(value_arr, dtype=float)
kernel = [1/7] * 7 + [0.0] * 6 ## Mean
value_arr = signal.convolve(value_arr, kernel[::-1], mode='same')
return value_arr
def itpFromCumul(begin, end, length):
if length == 1:
return [end-begin]
q = (end - begin) // length
r = (end - begin) % length
list_ = [q] * length
for i in range(r):
list_[i] += 1
return list_
################################################################################
## Functions - utilities specific to this file
def initializeStock_dailyCounts(col_tag_list):
ord_today = getTodayOrdinal()
date_list = [ordDateToISO(ord_) for ord_ in range(ISODateToOrd(ISO_DATE_REF), ord_today)]
stock = {'date': date_list}
stock.update({col_tag: np.zeros(len(date_list), dtype=int) for col_tag in col_tag_list})
return stock
def initializeStockDict_general(stock):
return {page: copy.deepcopy(stock) for page in PAGE_LIST}
def indexForLatest(iso):
ord_today = getTodayOrdinal()
ind = ISODateToOrd(iso) - ord_today + NB_LOOKBACK_DAYS
if ind < 0 or ind >= NB_LOOKBACK_DAYS:
return np.nan
return ind
def indexForOverall(iso):
ord_begin_overall = ISODateToOrd(ISO_DATE_REF)
ind = ISODateToOrd(iso) - ord_begin_overall
if ind < 0:
return np.nan
return ind
def indexFor2021(iso):
ord_begin_2021 = ISODateToOrd('2021-01-01')
ind = ISODateToOrd(iso) - ord_begin_2021
if ind < 0 or ind >= 365:
return np.nan
return ind
def indexFor2020(iso):
ord_begin_2020 = ISODateToOrd('2020-01-01')
ind = ISODateToOrd(iso) - ord_begin_2020
if ind < 0 or ind >= 366:
return np.nan
return ind
def makeIndexList(iso):
ind_latest = indexForLatest(iso)
ind_overall = indexForOverall(iso)
ind_2021 = indexFor2021(iso)
ind_2020 = indexFor2020(iso)
return [ind_latest, ind_overall, ind_2021, ind_2020]
def makeMovingAverage(value_arr):
avg_arr = sevenDayMovingAverage(value_arr)
avg_arr = np.around(avg_arr, decimals=4)
return avg_arr
def adjustDateRange(data):
ord_ref = ISODateToOrd(ISO_DATE_REF)
ord_begin = ISODateToOrd(data['date'].values[0])
ord_end = ISODateToOrd(data['date'].values[-1]) + 1
ord_today = getTodayOrdinal()
zero = [0] * (len(data.columns) - 1)
nan = [np.nan] * (len(data.columns) - 1)
stock1 = []
stock2 = []
for ord_ in range(ord_ref, ord_begin):
iso = ordDateToISO(ord_)
stock1.append([iso] + zero)
for ord_ in range(ord_end, ord_today):
iso = ordDateToISO(ord_)
stock2.append([iso] + nan)
if ord_ref > ord_begin:
data = data[ord_ref-ord_begin:]
data1 = pd.DataFrame(stock1, columns=data.columns)
data2 = | pd.DataFrame(stock2, columns=data.columns) | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr, bias=False)))
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="expanding")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="expanding")
@ | Appender(_shared_docs["quantile"]) | pandas.util._decorators.Appender |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(
['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H',
'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second',
'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with tm.assertRaises(TypeError):
rng + other
with tm.assertRaises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with tm.assertRaises(TypeError):
rng - other
with tm.assertRaises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
result = rng + delta
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_comp_nat(self):
left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT,
pd.Period('2011-01-03')])
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00',
'2011-01-01 16:00', '2011-01-01 15:00',
'2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00',
'2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H',
periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
freq='H')
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012',
'2011'], name='pidx', freq='A')
pexpected = PeriodIndex(
['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp,
check_dtype=False)
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx',
freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'],
name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(
['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertEqual(ordered.freq, 'D')
def test_nat_new(self):
idx = pd.period_range('2011-01', freq='M', periods=5, name='x')
result = idx._nat_new()
exp = pd.PeriodIndex([pd.NaT] * 5, freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.PeriodIndex([], name='xxx', freq='H')
with tm.assertRaises(TypeError):
# period shift doesn't accept freq
idx.shift(1, freq='H')
tm.assert_index_equal(idx.shift(0), idx)
tm.assert_index_equal(idx.shift(3), idx)
idx = pd.PeriodIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(0), idx)
exp = pd.PeriodIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(3), exp)
exp = pd.PeriodIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', freq='H')
tm.assert_index_equal(idx.shift(-3), exp)
def test_repeat(self):
index = pd.period_range('2001-01-01', periods=2, freq='D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], freq='D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.period_range('2001-01-01', periods=2, freq='2D')
exp = pd.PeriodIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], freq='2D')
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
index = pd.PeriodIndex(['2001-01', 'NaT', '2003-01'], freq='M')
exp = pd.PeriodIndex(['2001-01', '2001-01', '2001-01',
'NaT', 'NaT', 'NaT',
'2003-01', '2003-01', '2003-01'], freq='M')
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
def test_nat(self):
self.assertIs(pd.PeriodIndex._na_value, pd.NaT)
self.assertIs(pd.PeriodIndex([], freq='M')._na_value, pd.NaT)
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.PeriodIndex(['2011-01-01', 'NaT'], freq='D')
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for freq in ['D', 'M']:
idx = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq=freq)
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.PeriodIndex(['2011-01-01', '2011-01-02', 'NaT'],
freq='H')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.PeriodIndex._simple_new(idx.asi8, freq='H')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestPeriodIndexSeriesMethods(tm.TestCase):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assertRaisesRegexp(TypeError, msg):
obj + ng
with tm.assertRaises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assertRaisesRegexp(TypeError, msg):
obj - ng
with tm.assertRaises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
self.assertIs(np.add(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.add(ng, obj)
with tm.assertRaises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
self.assertIs(np.subtract(ng, obj), NotImplemented)
else:
with tm.assertRaises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj + offsets.Hour(2)
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
offsets.Hour(2) + obj
with tm.assertRaisesRegexp(period.IncompatibleFrequency, msg):
obj - offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = pd.Period('2012-01', freq='M') - idx
exp = pd.Index([12, 11, 10, 9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(pd.Period('2012-01', freq='M'), idx)
if _np_version_under1p10:
self.assertIs(result, NotImplemented)
else:
tm.assert_index_equal(result, exp)
exp = pd.TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name='idx')
tm.assert_index_equal(idx - pd.Period('NaT', freq='M'), exp)
tm.assert_index_equal(pd.Period('NaT', freq='M') - idx, exp)
def test_pi_sub_pdnat(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
exp = pd.TimedeltaIndex([pd.NaT] * 4, name='idx')
tm.assert_index_equal(pd.NaT - idx, exp)
| tm.assert_index_equal(idx - pd.NaT, exp) | pandas.util.testing.assert_index_equal |
import argparse
import pandas
def mean_std_table(
datasets,
dataset_labels,
metrics,
metric_labels,
model_order,
model_labels,
all_data,
output_file):
# set output_file
output_file = open(output_file, "w")
# stats
stats = ["Mean", "Median", "Peak"]
# keep relevant columns
all_data = all_data[["model", "dataset", metrics[0], metrics[1]]]
# write table headers
output_file.write("\\begin{table}[h!]\n")
output_file.write(" \\begin{center}\n")
output_file.write(" \\begin{tabular}{\n")
output_file.write(" l\n")
output_file.write(" p{1cm}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c@{\hspace{0.5cm}}\n")
output_file.write(" c\n")
output_file.write(" }\n")
output_file.write(" \\toprule\n")
output_file.write(" & & {")
for model in model_order:
if model == model_order[-1]:
output_file.write(model_labels[model] + "} \\\\ \midrule\n")
else:
output_file.write(model_labels[model] + "} & {")
# get data
mean = all_data.groupby(["model", "dataset"], as_index=False).mean()
std = all_data.groupby(["model", "dataset"]).std().reset_index()
# populate table
for dataset in range(len(dataset_labels)):
output_file.write(" \\multirow{5}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
output_file.write(" \\\\[0.2cm]\n")
for metric in range(len(metrics)):
output_file.write(" & " + metric_labels[metric] + " & ")
for model in model_order:
entry_mean = mean.loc[(mean["model"] == model) & (mean["dataset"] == datasets[dataset])]
entry_std = std.loc[(std["model"] == model) & (std["dataset"] == datasets[dataset])]
output_file.write("{:.1f}$\\pm${:.1f}".format(
round(entry_mean[metrics[metric]].item()*100, 1),
round(entry_std[metrics[metric]].item()*100, 1))
)
if model == model_order[-1]:
output_file.write(" \\\\")
if metrics[metric] == metrics[-1]:
if datasets[dataset] == datasets[-1]:
output_file.write("\n \\\\[0.2cm] \n \\bottomrule\n")
else:
output_file.write("\n \\\\[0.2cm] \n \\midrule\n")
else:
output_file.write("\n")
else:
output_file.write(" & ")
# write end of table
output_file.write(" \\end{tabular}\n")
output_file.write(" \\end{center}\n")
output_file.write(" \\caption{Mean and standard deviation of the performance on validation data of the best performing models.}\n")
output_file.write("\\end{table}\n\n")
def hyperparameters_table(
datasets,
dataset_labels,
model_order,
model_labels,
hyperparameters,
hyperparameter_labels,
metric,
metric_label,
all_trials,
best_trials,
output_file,
appendix,
table_label,
bayes):
# take either all Bayes trials, or none
if bayes:
all_trials = all_trials.loc[all_trials['folder'].str.contains("-bo")]
else:
all_trials = all_trials.loc[~all_trials['folder'].str.contains("-bo")]
# set printable values of some hyperparameters
# floats with printable will not be treated as float, meaning it will show a delta
print_values = {}
print_values["train_type"] = {
"negative_sampling": "NegSamp",
"1vsAll": "1vsAll",
"KvsAll": "KvsAll",
}
print_values["reciprocal"] = {
0: "No",
1: "Yes"
}
print_values["emb_regularize_p"] = {
0.0: "None",
1.0: "L1",
2.0: "L2",
3.0: "L3"
}
print_values["emb_regularize_weighted"] = {
0: "No",
1: "Yes",
}
print_values["transe_l_norm"] = {
1: "L1",
2: "L2",
}
print_values["transe_normalize_e"] = {
-1.0: "No",
1.0: "L1",
2.0: "L2",
}
print_values["transe_normalize_r"] = {
-1.0: "No",
1.0: "L1",
2.0: "L2",
}
print_values["emb_initialize"] = {
"normal_": "Normal",
"uniform_": "Unif.",
"xavier_normal_": "XvNorm",
"xavier_uniform_": "XvUnif"
}
print_values["train_loss"] = {
"kl": "CE",
"bce": "BCE",
"margin_ranking": "MR"
}
# set rounding for floats (defaults to 2 if not determined here)
# Not a pretty solution, couldn't quickly parameterize {:.2f}
round_5 = ["train_lr", "emb_initialize_normal_std"]
round_0 = ["num_negs_s", "num_negs_o"]
scientific = ["emb_e_regularize_weight", "emb_r_regularize_weight"]
# set compabitility between hyperparameters (determines when hyperparameter should be printed)
# {attribute_1: (attribute_2, [list of values])}
# Show attribute_1 iff value of attribute_2 is in list of values
compatibility = {
"num_negs_s":("train_type", ["negative_sampling"]),
"num_negs_o":("train_type", ["negative_sampling"]),
"label_smoothing":("train_type", ["KvsAll"]),
"margin":("train_loss", ["margin_ranking"]),
"transe_l_norm":("model", ["transe"]),
"transe_normalize_e":("model", ["transe"]),
"transe_normalize_r":("model", ["transe"]),
"conve_projection_dropout":("model", ["conve"]),
"conve_feature_map_dropout":("model", ["conve"]),
"emb_initialize_normal_std":("emb_initialize", ["normal_"]),
"emb_initialize_uniform_interval":("emb_initialize", ["uniform_"]),
"emb_e_regularize_weight":("emb_regularize_p", [1, 2, 3]),
"emb_r_regularize_weight":("emb_regularize_p", [1, 2, 3])
}
# set hyperparameters on the far left if table from appendix
far_left_params = [
"emb_e_dim",
"train_type",
"train_loss",
"train_optimizer",
"emb_regularize_p",
"emb_initialize",
]
# set hyperparameters that trigger a multicolumn row before them
multicol_params = {
"emb_e_dropout":"Dropout",
"transe_normalize_e":"Embedding normalization (TransE)"
}
# open output_file
output_file = open(output_file, "w")
# write table headers
if appendix and not bayes:
output_file.write("\\begin{sidewaystable}[h!]\n")
else:
output_file.write("\\begin{table}[t]\n")
output_file.write(" \\begin{center}\n")
output_file.write(" \\begin{tabular}{\n")
if appendix:
if not bayes:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.2cm}}\n")
output_file.write(" c")
else:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.1cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c")
else:
output_file.write(" l@{\hspace{0.2cm}}\n")
output_file.write(" l@{\hspace{-0.05cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c@{\hspace{0.2cm}}\n")
output_file.write(" r@{\hspace{0.1cm}}\n")
output_file.write(" c\n")
output_file.write("}\n")
output_file.write(" \\toprule\n")
if not bayes:
output_file.write("& & \multicolumn{2}{c}{")
else:
output_file.write("& & \multicolumn{1}{c}{")
for model in model_order:
if model == model_order[-1]:
output_file.write(model_labels[model] + "} \\\\ \\midrule\n")
else:
if not bayes:
output_file.write(model_labels[model] + "} & \multicolumn{2}{c}{")
else:
output_file.write(model_labels[model] + "} & & \multicolumn{1}{c}{")
# populate table
model_means = {}
for dataset in range(len(dataset_labels)):
if not bayes:
output_file.write(" \\multirow{" + str((len(hyperparameters) + len(multicol_params) + 1)) + "}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
else:
output_file.write(" \\multirow{" + str((len(hyperparameters))) + "}*{\\begin{turn}{90}\\emph{" + dataset_labels[dataset] + "}\\end{turn}}\n")
# write mean MRR of each model
if not bayes:
mean = best_trials.groupby(["model", "dataset"], as_index=False).mean()
count = all_trials.groupby(["model", "dataset"], as_index=False).count()
output_file.write("& " + metric_label + " & & \\emph{")
for model in model_order:
entry_mean = mean.loc[(mean["model"] == model) & (mean["dataset"] == datasets[dataset])]
model_means[model] = round(entry_mean[metric].item()*100, 1)
output_file.write("{:.1f}".format(model_means[model]))
if model == model_order[-1]:
output_file.write("} \\\\\n")
else:
output_file.write("} & & \emph{")
# hyperparameter rows
for i in range(len(hyperparameters)):
hp = hyperparameters[i]
if bayes:
show_delta = False
else:
show_delta = True
# insert multicolum row if necessary
if appendix and hp in multicol_params and not bayes:
output_file.write("&\multicolumn{6}{l}{" + multicol_params[hp] + "} \\\\\n")
# hyperparameter name
if appendix and hp in far_left_params and not bayes:
output_file.write("& " + hyperparameter_labels[i] + " & ")
else:
output_file.write("& $\\quad$" + hyperparameter_labels[i] + " & ")
for model in model_order:
# get model trials
model_trials = all_trials.loc[
(all_trials['model'] == model) &
(all_trials['dataset'] == datasets[dataset])
]
# value
max_entry = model_trials.loc[model_trials[metric] == model_trials[metric].max()]
value = max_entry[hp].item()
# check compatibility (whether it should be printed or not)
compatible = True
if hp in compatibility and max_entry[compatibility[hp][0]].item() not in compatibility[hp][1]:
compatible = False
show_delta = False
if compatible:
if isinstance(value, float) and hp not in print_values:
show_delta = False
if hp == "emb_initialize_uniform_interval":
output_file.write("[{:.2f}, {:.2f}] & ".format(round(value, 2), round(value, 2) * -1))
else:
if hp in round_5:
output_file.write("{:.5f} & ".format(round(value, 5)))
elif hp in round_0:
output_file.write("{:.0f} & ".format(round(value, 0)))
elif hp in scientific:
output_file.write(scientific_notation(value) + " & ")
else:
output_file.write("{:.2f} & ".format(round(value, 2)))
else:
if hp in print_values:
printable_value = print_values[hp][max_entry[hp].item()]
else:
printable_value = value
output_file.write("{} & ".format(printable_value))
if show_delta:
output_file.write("\emph{")
# delta
if show_delta:
delta_trials = model_trials.loc[model_trials[hp] != value]
if len(delta_trials.index):
max_entry = delta_trials.loc[delta_trials[metric] == delta_trials[metric].max()]
delta = round((max_entry[metric].item() * 100) - model_means[model], 1)
output_file.write("({:.1f})".format(delta))
else:
output_file.write("--")
output_file.write("}")
else:
output_file.write("-- & ")
# close line
if model == model_order[-1]:
output_file.write(" \\\\")
if hp == hyperparameters[-1]:
if datasets[dataset] == datasets[-1]:
output_file.write("\n \\bottomrule\n")
else:
output_file.write("\n \\midrule\n")
else:
output_file.write("\n")
else:
output_file.write(" & ")
# write end of table
output_file.write(" \\end{tabular}\n")
output_file.write(" \\end{center}\n")
output_file.write(" \\caption{Insert caption here}\n")
output_file.write(" \\label{" + table_label + "}\n")
if appendix and not bayes:
output_file.write("\\end{sidewaystable}\n")
else:
output_file.write("\\end{table}\n")
def scientific_notation(number):
number_str = "{:.2E}".format(number).split("E")
return r"$" + number_str[0] + r"^{" + number_str[1] + "}$"
if __name__ == '__main__':
# parse args
parser = argparse.ArgumentParser()
parser.add_argument(
'--all_trials',
type=str,
required=True,
help="csvs of all trials created with dump command, comma separated")
parser.add_argument(
'--best_trials',
type=str,
required=True,
help="csvs of best trials created with dump command, comma separated")
args, _ = parser.parse_known_args()
# load input CSVs
csvs = []
for input_file in args.all_trials.split(","):
csvs.append(pandas.read_csv(input_file))
all_trials = pandas.concat(csvs)
csvs = []
for input_file in args.best_trials.split(","):
csvs.append( | pandas.read_csv(input_file) | pandas.read_csv |
import dhlab.nbtext as nb
import requests
import pandas as pd
from IPython.display import HTML
# HMMM
# extra function for word frequencies
def word_frequencies(word_list):
""" Find frequency of words global for digibok """
params = {'words':word_list}
r = requests.post("https://api.nb.no/ngram/word_frequencies", json = params)
return dict(r.json())
# get a frame
def frame(something, name = None):
"""Try to make a frame out of something and name columns according to name, which should be a string or a list of strings,
one for each column. Mismatch in numbers is taken care of."""
if isinstance(something, dict):
res = | pd.DataFrame.from_dict(something, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
from tqdm import tqdm
from config import visit_plan_raw_data_path, agent_replacements_raw_data_path
from config import date_analysis_raw_data_drop_cols, credit_requests_raw_data_drop_cols
from config import sr_loading_raw_data_drop_cols, sr_unloading_raw_data_drop_cols
from config import take_shipment_data_cols
from config import take_future_visit_plan_data_cols, take_pos_visit_plan_data_cols
from config import take_date_analysis_data_cols, take_credit_req_data_cols
from config import shipment_raw_data_path, pos_master_raw_data_path
from config import sr_loading_raw_data_path, sr_unloading_raw_data_path
from config import stock_collection_raw_data_path
from config import esgo_date_analysis_raw_data_path, credit_requests_raw_data_path
tqdm.pandas()
# Reconcile agent replacements, holiday adjustments in visit plan data
def __visit_plan_reconciliation(visit_plan_data: pd.DataFrame,
weekday_holiday_dates: list) -> pd.DataFrame:
visit_plan_df: pd.DataFrame = visit_plan_data.reset_index()
visit_plan_df = visit_plan_df.sort_values(['visit_date'])
visit_plan_df.loc[
visit_plan_df['visit_date'].isin(weekday_holiday_dates),
'weekday_holiday'] = True
visit_plan_df['weekday_holiday'] = visit_plan_df['weekday_holiday'].fillna(False)
def __create_new_markers(pos_plan_df: pd.DataFrame) -> pd.DataFrame:
current_pos = pos_plan_df['pos_code'].iloc[0]
# print(current_pos)
pos_plan_df['type_lead_1'] = pos_plan_df['type'].shift(-1)
pos_plan_df.loc[
pos_plan_df['type_lead_1'] == 'Holiday without replacement',
'pre_nonreplacement_holiday'] = True
pos_plan_df['pre_nonreplacement_holiday'] = \
pos_plan_df['pre_nonreplacement_holiday'].fillna(False)
pos_plan_df['double_sell'] = pos_plan_df['weekday_holiday'].shift(-1).fillna(False)
pos_plan_df['weekday_holiday(-2)'] = pos_plan_df['weekday_holiday'].shift(-2).fillna(False)
pos_plan_df['triple_sell'] = (pos_plan_df['double_sell']
& pos_plan_df['weekday_holiday(-2)'])
pos_plan_df.loc[pos_plan_df['triple_sell'], 'double_sell'] = False
return pos_plan_df.drop(columns=['type_lead_1', 'weekday_holiday(-2)'])
visit_plan_df = visit_plan_df.groupby(['pos_code']).progress_apply(
__create_new_markers)
# Holiday without replacement means no agent actually went there
visit_plan_df = visit_plan_df.loc[
visit_plan_df['type'] != 'Holiday without replacement']
# Take only non-weekday-holiday dates
visit_plan_df = visit_plan_df.loc[~visit_plan_df['weekday_holiday']]
visit_plan_df.loc[visit_plan_df['type'] == 'Replacement', 'agent_code'] = \
visit_plan_df.loc[visit_plan_df['type'] == 'Replacement',
'backup_agent_code'].astype(int)
return visit_plan_df
# Add derived columns as required to date_analysis_data
def __date_analysis_additions(date_analysis_data: pd.DataFrame) -> pd.DataFrame:
# manual correction according to seen data
date_analysis_data.loc[
[pd.Timestamp(day=7, month=12, year=2018)],
'IsWorkingDate'] = 0
date_analysis_data['WeekdayHoliday'] = ((date_analysis_data['DayNumberOfWeek'] <= 5)
& (date_analysis_data['IsWorkingDate'] == 0))
date_analysis_data['PriceChgPeriod'] = (date_analysis_data['PricelistChgAnn']
| date_analysis_data['PricelistChg'])
date_analysis_data['PriceChgPeriod'] = date_analysis_data['PriceChgPeriod'].cumsum()
date_analysis_data['PriceChgEffect'] = \
date_analysis_data['PriceChgPeriod'].apply(lambda x: x % 2 != 0)
date_analysis_data['PriceChgPeriod'] = (date_analysis_data['PriceChgEffect']
| date_analysis_data['PricelistChg'])
# Convert to sparse then query index to find block locations
temp_ts: pd.SparseSeries = date_analysis_data['PriceChgPeriod'].to_sparse(
fill_value=False)
block_locs = zip(temp_ts.sp_index.blocs, temp_ts.sp_index.blengths)
# Map the sparse blocks back to the dense timeseries
block_infos = [(date_analysis_data['PriceChgPeriod'].iloc[start:(start + length)],
length)
for (start, length) in block_locs]
for series_block, length in block_infos:
values = range(length)
date_analysis_data.loc[series_block.index, 'daysSincePriceChgAnn'] = values
date_analysis_data.loc[series_block.index, 'daysFromPriceChg'] = values[::-1]
date_analysis_data['daysSincePriceChgAnn'] = \
date_analysis_data['daysSincePriceChgAnn'].fillna(-1).astype(int)
date_analysis_data['daysFromPriceChg'] = \
date_analysis_data['daysFromPriceChg'].fillna(-1).astype(int)
return date_analysis_data.drop(columns=['PricelistChgAnn',
'PriceChgPeriod',
'PriceChgEffect'])
# prepare shipment data into consumable format
def __prep_consumable_shipments_data(shipment_data: pd.DataFrame,
train_period_shipments: pd.DataFrame) -> pd.DataFrame:
shipment_records: pd.DataFrame = shipment_data.loc[shipment_data['return'] == 0].copy()
shipments_train_agg: pd.DataFrame = train_period_shipments.groupby(
['invoice_date', 'product_code']).agg({'quantity': 'sum'})
shipments_train_agg = shipments_train_agg.reset_index().groupby(
['product_code']).agg({'quantity': 'mean'})
shipments_train_agg['contribution%'] = (shipments_train_agg['quantity']
/ shipments_train_agg['quantity'].sum())*100
shipments_train_agg = shipments_train_agg.sort_values(['contribution%'])
shipments_train_agg['contribution%_cumsum'] = shipments_train_agg['contribution%'].cumsum()
# take products which make up 5% or a little more than 5% of the total quantity
num_small_products = len(shipments_train_agg.loc[
shipments_train_agg['contribution%_cumsum'] < 5])
small_products = shipments_train_agg.index[:num_small_products+1]
# pd.Series(small_products).to_csv('small_products_BW.csv')
num_medium_products = len(shipments_train_agg.loc[
(shipments_train_agg['contribution%_cumsum'] >= 5)
& (shipments_train_agg['contribution%_cumsum'] < 20)])
medium_products = shipments_train_agg.index[num_small_products+1
: num_small_products+num_medium_products+1]
# pd.Series(medium_products).to_csv('medium_products_BW.csv')
shipment_records = shipment_records.groupby(['invoice_date', 'pos_code',
'product_code']).agg({'agent_code': 'first',
'quantity': 'sum',
'promo_id': 'first'})
return_records: pd.DataFrame = shipment_data.loc[shipment_data['return'] == 1].copy()
return_records = return_records.groupby(['invoice_date', 'pos_code',
'product_code']).agg({'agent_code': 'first',
'quantity': 'sum',
'return': 'first'})
return_records['return'] = return_records['return'].astype('bool')
shipment_records['return'] = return_records['return']
shipment_records['return'] = shipment_records['return'].fillna(False)
shipment_records = shipment_records.reset_index()
shipment_records.loc[
shipment_records['product_code'].isin(small_products), 'product_cat'] = 'small'
shipment_records['product_cat'] = shipment_records['product_cat'].fillna('large')
shipment_records = shipment_records.set_index('invoice_date')
return shipment_records.sort_index()
# flatten credit request data into a timeseries
def __prep_credit_req_data(credit_req_data: pd.DataFrame):
credit_req_components: list = []
def __flatten_credit_req_days(row: pd.DataFrame) -> None:
start_date = row["PlannedStartDate"]
end_date = row["PlannedClosedDate"]
ret = pd.DataFrame(index=pd.date_range(start_date, end_date))
ret.loc[:, 'request_type'] = row['ReqType']
ret.loc[:, 'increment_coeff'] = row['IncrCoef']
ret.loc[:, 'agent_code'] = row['AgentCode']
credit_req_components.append(ret)
credit_req_data.progress_apply(__flatten_credit_req_days, axis=1)
processed_credit_req_data = pd.concat(credit_req_components)
processed_credit_req_data['agent_code'] = processed_credit_req_data[
'agent_code'].astype('int').astype('str')
processed_credit_req_data.index.name = 'visit_date'
processed_credit_req_data = processed_credit_req_data.reset_index()
processed_credit_req_data = processed_credit_req_data.drop_duplicates().reset_index(drop=True)
return processed_credit_req_data
# main method which loads the data and provides them for consumption
# by the project, in appropriate formats
def load_data(visit_plan_path: str,
shipment_data_path: str,
date_analysis_data_path: str,
shipment_split_data_path: str,
sr_loading_data_path: str,
sr_unloading_data_path: str,
stock_collection_data_path: str,
credit_requests_data_path: str,
pre_easter_effect_data_path: str,
pred_start_date: pd.Timestamp) -> tuple:
shipment_data: pd.DataFrame = pd.read_pickle(shipment_data_path)
shipment_data = shipment_data[take_shipment_data_cols]
shipment_data['agent_code'] = shipment_data['agent_code'].astype(str)
shipment_data['pos_code'] = shipment_data['pos_code'].astype(str)
products_to_consider = shipment_data.loc[
shipment_data['invoice_date']
>= pd.Timestamp(day=1, month=1, year=2018)]['product_code'].unique()
products_to_consider = [product for product in products_to_consider if 'RLGC' not in product]
shipment_data = shipment_data.loc[
shipment_data['product_code'].isin(products_to_consider)]
shipment_records = __prep_consumable_shipments_data(shipment_data,
shipment_data[
(shipment_data['invoice_date']
< pd.Timestamp(day=1,
month=1,
year=2019))
])
print('Completed loading shipment data.')
date_analysis_data: pd.DataFrame = pd.read_pickle(date_analysis_data_path)
date_analysis_data = date_analysis_data.set_index('Date')
date_analysis_data = date_analysis_data[take_date_analysis_data_cols]
date_analysis_data = __date_analysis_additions(date_analysis_data)
print('Completed loading different date features data.')
visit_plan_data: pd.DataFrame = pd.read_pickle(visit_plan_path)
weekday_holiday_dates = date_analysis_data[date_analysis_data['WeekdayHoliday']].index.tolist()
visit_plan_data = __visit_plan_reconciliation(visit_plan_data, weekday_holiday_dates)
visit_plan_data['agent_code'] = visit_plan_data['agent_code'].astype(str)
visit_plan_data['pos_code'] = visit_plan_data['pos_code'].astype(str)
# remove any weekend dates, if any, from visit plan
visit_plan_data = visit_plan_data[
visit_plan_data['visit_date'].isin(
date_analysis_data[date_analysis_data['IsWorkingDate'] == 1].index)]
future_visit_plan_data = visit_plan_data.loc[
visit_plan_data['visit_date'] >= pred_start_date,
take_future_visit_plan_data_cols].copy()
future_visit_plan_data.set_index(['visit_date', 'agent_code', 'pos_code'],
inplace=True)
future_visit_plan_data.sort_index(inplace=True)
pos_visit_plan_data = visit_plan_data
pos_visit_plan_data = pos_visit_plan_data[take_pos_visit_plan_data_cols]
pos_visit_plan_data.set_index(['pos_code'], inplace=True)
print('Completed loading visit plan related data.')
shipment_split_data: pd.DataFrame = pd.read_pickle(shipment_split_data_path)
shipment_split_data['agent_code'] = shipment_split_data['agent_code'].astype(str)
shipment_split_data.set_index(['visit_date', 'agent_code', 'product_code'],
inplace=True)
print('Completed loading ER, non-ER shipment split data.')
sr_loading_data: pd.DataFrame = pd.read_pickle(sr_loading_data_path)
sr_loading_data['agent_code'] = sr_loading_data['agent_code'].astype(str)
sr_loading_data.set_index(['visit_date', 'agent_code', 'product_code'],
inplace=True)
print('Completed loading SR loading data.')
sr_unloading_data: pd.DataFrame = pd.read_pickle(sr_unloading_data_path)
sr_unloading_data['agent_code'] = sr_unloading_data['agent_code'].astype(str)
sr_unloading_data.set_index(['visit_date', 'agent_code', 'product_code'],
inplace=True)
print('Completed loading SR unloading data.')
stock_collection_data: pd.DataFrame = pd.read_pickle(stock_collection_data_path)
stock_collection_data.rename(columns={'stock_date': 'invoice_date'}, inplace=True)
stock_collection_data['pos_code'] = stock_collection_data['pos_code'].astype(str)
print('Completed loading stock collection data.')
credit_requests_data: pd.DataFrame = pd.read_pickle(credit_requests_data_path)
credit_requests_data = credit_requests_data[take_credit_req_data_cols]
credit_requests_data = __prep_credit_req_data(credit_requests_data)
print('Completed loading credit requests data.')
pre_easter_effect_data: pd.DataFrame = | pd.read_pickle(pre_easter_effect_data_path) | pandas.read_pickle |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = | Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0]) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import argparse
from misc import *
import pandas as pd
DEFPATH = "/home/bakirillov/HDD/weights/fasttext/aligned/wiki.en.align.vec"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--vectors",
dest="vectors",
action="store",
help="Path to file with aligned vectors",
default=DEFPATH
)
parser.add_argument(
"-s", "--study",
dest="study",
action="store",
help="Path to study data file",
default="en_study.pkl"
)
parser.add_argument(
"-p", "--participant",
dest="participant",
action="store",
default="all",
help="the participant id"
)
parser.add_argument(
"-o", "--output",
dest="output",
action="store",
help="set the path of output file"
)
parser.add_argument(
"-w", "--what",
dest="what",
action="store",
choices=["wv", "1hot"],
default="wv",
help="set the type of output"
)
args = parser.parse_args()
study = Study.load_from_file(args.study)
if args.participant == "all":
word_aucs = study.compute_word_aucs()
words = word_aucs.index
else:
words = study[int(args.participant)][1][2]
if args.what == "wv":
if "vec" in args.vectors:
data = load_vectors(
args.vectors, words
)
else:
data = pd.read_csv(args.vectors, index_col=0).T[0:-1]
elif args.what == "1hot":
if "_1hot_" not in args.output:
data = {a: Study.onehot(a) for a in words}
else:
data = {a: [a] for a in words}
if args.participant == "all":
pd.DataFrame(data).T.join(word_aucs).to_csv(args.output)
else:
real_answers = study[int(args.participant)][1][5].values
given = study[int(args.participant)][1]["answers"].values
answers = pd.DataFrame(
{
"answers": np.array(
[int(a == b) for a, b in zip(real_answers, given)]
),
}
)
answers.index = words.values
| pd.DataFrame(data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 27, 2022
SHREAD Dash Snow Plot
Script for running the snow plot in the dashboard (shread_dash.py)
@author: buriona, tclarkin (2020-2022)
"""
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plot_lib.utils import import_snotel,import_csas_live
from database import snotel_sites
from database import csas_gages
from plot_lib.utils import screen_spatial,ba_stats_all,ba_stats_std,screen_csas,screen_snotel
from plot_lib.utils import ba_min_plot, ba_max_plot, ba_mean_plot, ba_median_plot
from plot_lib.utils import shade_forecast
def get_basin_stats(snodas_df,stype="swe"):
dates = snodas_df["Date"].unique()
last_date = dates.max()
snodas_unique = snodas_df[snodas_df["Date"]==last_date]
mean_el = round(snodas_unique["elev_ft"].mean(),0)
points = len(snodas_unique)
area = round(points * 0.386102, 0)
if stype=="swe":
mean_ft = snodas_unique["mean"].mean()/12
vol_af = round(mean_ft*area*640,0)
stats = (
f'Volume: ~{vol_af:,.0f} acre-feet | '
f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. | '
f'(approximated by {points} points)'
)
else:
stats = (
f'Mean Elevation: {mean_el:,.0f} feet & Area: {area:,.0f} sq.mi. |'
f'(approximated by {points} points)'
)
return stats
def get_snow_plot(basin, stype, elrange, aspects, slopes, start_date,
end_date, dtype,snotel_sel,csas_sel,forecast_sel,plot_albedo,
offline=True):
"""
:description: this function updates the snowplot
:param basin: the selected basins (checklist)
:param stype: the snow type (swe/snowdepth)
:param elrange: the range of elevations ([min,max])
:param aspects: the range of aspects ([min,max])
:param slopes: the range of slopes ([min,max])
:param start_date: start date (from date selector)
:param end_date: end date (from date selector)
:param snotel_sel: list of selected snotel sites ([])
:param albedo: boolean
:return: update figure
"""
# Set dtype:
dtype = "dv"
# Create date axis
dates = | pd.date_range(start_date, end_date, freq="D", tz='UTC') | pandas.date_range |
"""
.. module:: reporters
:platform: Unix, Windows
:synopsis: a module for defining OpenMM reporter classes.
.. moduleauthor:: <NAME> <<EMAIL>>
.. _pandas.DataFrame: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _StateDataReporter: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.app.statedatareporter.StateDataReporter.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _CustomCVForce: docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
"""
import sys
import numpy as np
import pandas as pd
from simtk import openmm
from simtk import unit
from simtk.openmm import app
from .computers import PressureComputer
from .computers import _MoleculeTotalizer
from .utils import InputError
class _MultiStream:
def __init__(self, outputs):
self._outputs = list()
for output in outputs:
self._outputs.append(open(output, 'w') if isinstance(output, str) else output)
def __del__(self):
for output in self._outputs:
if output != sys.stdout and output != sys.stderr:
output.close()
def write(self, message):
for output in self._outputs:
output.write(message)
def flush(self):
for output in self._outputs:
output.flush()
class _AtomsMM_Reporter():
"""
Base class for reporters.
"""
def __init__(self, file, reportInterval, **kwargs):
self._reportInterval = reportInterval
self._requiresInitialization = True
self._needsPositions = False
self._needsVelocities = False
self._needsForces = False
self._needEnergy = False
extraFile = kwargs.pop('extraFile', None)
if extraFile is None:
self._out = open(file, 'w') if isinstance(file, str) else file
else:
self._out = _MultiStream([file, extraFile])
self._separator = kwargs.pop('separator', ',')
def _initialize(self, simulation, state):
pass
def _generateReport(self, simulation, state):
pass
def describeNextReport(self, simulation):
"""
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, self._needsPositions, self._needsVelocities, self._needsForces, self._needEnergy)
def report(self, simulation, state):
"""
Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
if self._requiresInitialization:
self._initialize(simulation, state)
self._requiresInitialization = False
self._generateReport(simulation, state)
class ExtendedStateDataReporter(app.StateDataReporter):
"""
An extension of OpenMM's StateDataReporter_ class, which outputs information about a simulation,
such as energy and temperature, to a file.
All original functionalities of StateDataReporter_ are preserved and the following ones are
included:
1. Report the Coulomb contribution of the potential energy (keyword: `coulombEnergy`):
This contribution includes both real- and reciprocal-space terms.
2. Report the atomic virial of a fully-flexible system (keyword: `atomicVirial`):
Considering full scaling of atomic coordinates in a box volume change (i.e. without any
distance constraints), the internal virial of the system is given by
.. math::
W = -\\sum_{i,j} r_{ij} E^\\prime(r_{ij}),
where :math:`E^\\prime(r)` is the derivative of the pairwise interaction potential as a
function of the distance between to atoms. Such interaction includes van der Waals, Coulomb,
and bond-stretching contributions. Bond-bending and dihedral angles are not considered
because they are invariant to full volume-scaling of atomic coordinates.
3. Report the nonbonded contribution of the atomic virial (keyword: `nonbondedVirial`):
The nonbonded virial is given by
.. math::
W_\\mathrm{nb} = -\\sum_{i,j} r_{ij} E_\\mathrm{nb}^\\prime(r_{ij}),
where :math:`E_\\mathrm{nb}^\\prime(r)` is the derivative of the nonbonded pairwise
potential, which comprises van der Waals and Coulomb interactions only.
4. Report the atomic pressure of a fully-flexible system (keyword: `atomicPressure`):
.. math::
P = \\frac{2 K + W}{3 V},
where :math:`K` is the kinetic energy sum for all atoms in the system. If keyword
`bathTemperature` is employed (see below), the instantaneous kinetic energy is substituted
by its equipartition-theorem average
:math:`\\left\\langle K \\right\\rangle = 3 N_\\mathrm{atoms} k_B T/2`,
where :math:`T` is the heat-bath temperature.
5. Report the molecular virial of a system (keyword: `molecularVirial`):
To compute the molecular virial, only the center-of-mass coordinates of the molecules are
considered to scale in a box volume change, while the internal molecular structure is kept
unaltered. The molecular virial is computed from the nonbonded part of the atomic virial by
using the formulation of Ref. :cite:`Hunenberger_2002`:
.. math::
W_\\mathrm{mol} = W - \\sum_{i} (\\mathbf{r}_i - \\mathbf{r}_i^\\mathrm{cm}) \\cdot \\mathbf{F}_i,
where :math:`\\mathbf{r}_i` is the coordinate of atom i, :math:`\\mathbf{F}_i` is the
resultant pairwise force acting on it (excluding bond-bending and dihedral angles), and
:math:`\\mathbf{r}_i^\\mathrm{cm}` is the center-of-mass coordinate of its containing
molecule.
6. Report the molecular pressure of a system (keyword: `molecularPressure`):
.. math::
P = \\frac{2 K_\\mathrm{mol} + W_\\mathrm{mol}}{3 V},
where :math:`K_\\mathrm{mol}` is the center-of-mass kinetic energy summed for all molecules
in the system. If keyword `bathTemperature` is employed (see below), the instantaneous
kinetic energy is substituted by its equipartition-theorem average
:math:`\\left\\langle K_\\mathrm{mol} \\right\\rangle = 3 N_\\mathrm{mols} k_B T/2`,
where :math:`T` is the heat-bath temperature.
7. Report the center-of-mass kinetic energy (keyword: `molecularKineticEnergy`):
.. math::
K_\\mathrm{mol} = \\frac{1}{2} \\sum_{i=1}^{N_\\mathrm{mol}} M_i v_{\\mathrm{cm}, i}^2,
where :math:`N_\\mathrm{mol}` is the number of molecules in the system, :math:`M_i` is the
total mass of molecule `i`, and :math:`v_{\\mathrm{cm}, i}` is the center-of-mass velocity
of molecule `i`.
8. Report potential energies at multiple global parameter states (keyword: `globalParameterStates`):
Computes and reports the potential energy of the system at a number of provided global
parameter states.
9. Report global parameter values (keyword: `globalParameters`):
Reports the values of specified global parameters.
10. Report derivatives of energy with respect to global parameters (keyword: `energyDerivatives`):
Computes and reports derivatives of the potential energy of the system at the current
state with respect to specified global parameters.
11. Report values of collective variables (keyword: `collectiveVariables`)
Report the values of a set of collective variables.
12. Allow specification of an extra file for reporting (keyword: `extraFile`).
This can be used for replicating a report simultaneously to `sys.stdout` and to a file
using a unique reporter.
Keyword Args
------------
coulombEnergy : bool, optional, default=False
Whether to write the Coulomb contribution of the potential energy to the file.
atomicVirial : bool, optional, default=False
Whether to write the total atomic virial to the file.
nonbondedVirial : bool, optional, default=False
Whether to write the nonbonded contribution to the atomic virial to the file.
atomicPressure : bool, optional, default=False
Whether to write the internal atomic pressure to the file.
molecularVirial : bool, optional, default=False
Whether to write the molecular virial to the file.
molecularPressure : bool, optional, default=False
Whether to write the internal molecular pressure to the file.
molecularKineticEnergy : bool, optional, default=False
Whether to write the molecular center-of-mass kinetic energy to the file.
globalParameterStates : pandas.DataFrame_, optional, default=None
A DataFrame containing context global parameters (column names) and sets of values
thereof. If it is provided, then the potential energy will be reported for every state
these parameters define.
globalParameters : list(str), optional, default=None
A list of global parameter names. If it is provided, then the values of these parameters
will be reported.
energyDerivatives : list(str), optional, default=None
A list of global parameter names. If it is provided, then the derivatives of the
total potential energy with respect to these parameters will be reported. It is
necessary that the calculation of these derivatives has been activated beforehand
(see, for instance, CustomIntegrator_).
collectiveVariables : list(openmm.CustomCVForce), optional, default=None
A list of CustomCVForce_ objects. If it is provided, then the values of all collective
variables associated with these objects will be reported.
pressureComputer : :class:`~atomsmm.computers.PressureComputer`, optional, default=None
A computer designed to determine pressures and virials. This is mandatory if any keyword
related to virial or pressure is set as `True`.
extraFile : str or file, optional, default=None
Extra file to write to, specified as a file name or a file object.
"""
def __init__(self, file, reportInterval, **kwargs):
self._coulombEnergy = kwargs.pop('coulombEnergy', False)
self._atomicVirial = kwargs.pop('atomicVirial', False)
self._nonbondedVirial = kwargs.pop('nonbondedVirial', False)
self._atomicPressure = kwargs.pop('atomicPressure', False)
self._molecularVirial = kwargs.pop('molecularVirial', False)
self._molecularPressure = kwargs.pop('molecularPressure', False)
self._molecularKineticEnergy = kwargs.pop('molecularKineticEnergy', False)
self._globalParameterStates = kwargs.pop('globalParameterStates', None)
self._globalParameters = kwargs.pop('globalParameters', None)
self._energyDerivatives = kwargs.pop('energyDerivatives', None)
self._collectiveVariables = kwargs.pop('collectiveVariables', None)
self._pressureComputer = kwargs.pop('pressureComputer', None)
extra = kwargs.pop('extraFile', None)
if extra is None:
super().__init__(file, reportInterval, **kwargs)
else:
super().__init__(_MultiStream([file, extra]), reportInterval, **kwargs)
self._computing = any([self._coulombEnergy,
self._atomicVirial,
self._nonbondedVirial,
self._atomicPressure,
self._molecularVirial,
self._molecularPressure,
self._molecularKineticEnergy])
if self._computing:
if self._pressureComputer is not None and not isinstance(self._pressureComputer, PressureComputer):
raise InputError('keyword "pressureComputer" requires a PressureComputer instance')
self._needsPositions = True
self._needsForces = any([self._needsForces,
self._molecularVirial,
self._molecularPressure])
self._needsVelocities = any([self._needsVelocities,
self._molecularPressure,
self._atomicPressure,
self._molecularKineticEnergy])
self._backSteps = -sum([self._speed, self._elapsedTime, self._remainingTime])
def _add_item(self, lst, item):
if self._backSteps == 0:
lst.append(item)
else:
lst.insert(self._backSteps, item)
def _constructHeaders(self):
headers = super()._constructHeaders()
if self._coulombEnergy:
self._add_item(headers, 'Coulomb Energy (kJ/mole)')
if self._atomicVirial:
self._add_item(headers, 'Atomic Virial (kJ/mole)')
if self._nonbondedVirial:
self._add_item(headers, 'Nonbonded Virial (kJ/mole)')
if self._atomicPressure:
self._add_item(headers, 'Atomic Pressure (atm)')
if self._molecularVirial:
self._add_item(headers, 'Molecular Virial (kJ/mole)')
if self._molecularPressure:
self._add_item(headers, 'Molecular Pressure (atm)')
if self._molecularKineticEnergy:
self._add_item(headers, 'Molecular Kinetic Energy (kJ/mole)')
if self._globalParameterStates is not None:
for index in self._globalParameterStates.index:
self._add_item(headers, 'Energy[{}] (kJ/mole)'.format(index))
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(headers, name)
if self._energyDerivatives is not None:
for name in self._energyDerivatives:
self._add_item(headers, 'diff(E,{})'.format(name))
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for index in range(force.getNumCollectiveVariables()):
name = force.getCollectiveVariableName(index)
self._add_item(headers, name)
return headers
def _constructReportValues(self, simulation, state):
values = super()._constructReportValues(simulation, state)
if self._computing:
computer = self._pressureComputer
computer.import_configuration(state)
atomicVirial = computer.get_atomic_virial().value_in_unit(unit.kilojoules_per_mole)
if self._coulombEnergy:
coulombVirial = computer.get_coulomb_virial()
self._add_item(values, coulombVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicVirial:
self._add_item(values, atomicVirial)
if self._nonbondedVirial:
nonbondedVirial = computer.get_dispersion_virial() + computer.get_coulomb_virial()
self._add_item(values, nonbondedVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicPressure:
atomicPressure = computer.get_atomic_pressure()
self._add_item(values, atomicPressure.value_in_unit(unit.atmospheres))
if self._molecularVirial or self._molecularPressure:
forces = state.getForces(asNumpy=True)
if self._molecularVirial:
molecularVirial = computer.get_molecular_virial(forces)
self._add_item(values, molecularVirial.value_in_unit(unit.kilojoules_per_mole))
if self._molecularPressure:
molecularPressure = computer.get_molecular_pressure(forces)
self._add_item(values, molecularPressure.value_in_unit(unit.atmospheres))
if self._molecularKineticEnergy:
molKinEng = computer.get_molecular_kinetic_energy()
self._add_item(values, molKinEng.value_in_unit(unit.kilojoules_per_mole))
if self._globalParameterStates is not None:
original = dict()
for name in self._globalParameterStates.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for index, row in self._globalParameterStates.iterrows():
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
self._add_item(values, energy.value_in_unit(unit.kilojoules_per_mole))
for name, value in original.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(values, simulation.context.getParameter(name))
if self._energyDerivatives is not None:
mystate = simulation.context.getState(getParameterDerivatives=True)
derivative = mystate.getEnergyParameterDerivatives()
for name in self._energyDerivatives:
self._add_item(values, derivative[name])
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for cv in force.getCollectiveVariableValues(simulation.context):
self._add_item(values, cv)
return values
class XYZReporter(_AtomsMM_Reporter):
"""
Outputs to an XYZ-format file a series of frames containing the coordinates, velocities,
momenta, or forces on all atoms in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an XYZReporter object and append it to the Simulation's list of
reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def __init__(self, file, reportInterval, **kwargs):
self._output = kwargs.get('output', 'positions')
self._groups = kwargs.get('groups', None)
if self._output == 'positions':
self._unit = unit.angstroms
elif self._output == 'velocities':
self._unit = unit.angstroms/unit.picoseconds
elif self._output == 'momenta':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds
elif self._output == 'forces':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds**2
else:
raise InputError('Unrecognizable keyword value')
super().__init__(file, reportInterval, **kwargs)
self._needsPositions = self._output == 'positions'
self._needsVelocities = self._output in ['velocities', 'momenta']
self._needsForces = self._output == 'forces'
def _initialize(self, simulation, state):
self._symbols = [atom.element.symbol for atom in simulation.topology.atoms()]
sys = simulation.system
self._N = sys.getNumParticles()
if self._output == 'momenta':
mass = [sys.getParticleMass(i).value_in_unit(unit.dalton) for i in range(self._N)]
self._mass = np.vstack([mass, mass, mass]).transpose()*unit.dalton
def _get_values(self, simulation, state):
if self._output == 'positions':
values = state.getPositions(asNumpy=True)
elif self._output == 'velocities':
values = state.getVelocities(asNumpy=True)
elif self._output == 'momenta':
values = self._mass*state.getVelocities(asNumpy=True)
elif self._groups is None:
values = state.getForces(asNumpy=True)
else:
new_state = simulation.context.getState(getForces=True, groups=self._groups)
values = new_state.getForces(asNumpy=True)
return values.value_in_unit(self._unit)
def _write(self, step, N, names, values):
print(N, file=self._out)
| pd.DataFrame(index=names, data=values) | pandas.DataFrame |
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
import dash_color_picker as dcp
import dash_daq as daq
import pandas as pd
import numpy as np
import h5py
import dash_colorscales as dcs
import random
import json
import math
import statistics
import os
from flask import request
import sys
filePath = ''
groupPath = ''
if not len(sys.argv) > 1:
print("Expecting link argument.")
else:
print("in p1.py link is " + sys.argv[1] + ' ' + sys.argv[2])
filePath = sys.argv[1]
groupPath = sys.argv[2]
external_stylesheets = ['dash_simba_base.css', '//use.fontawesome.com/releases/v5.0.7/css/all.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
hf = h5py.File(filePath, 'r')
# Input: string path of group
# Output: returns list of groups, dictionary of group and the videos in each group, and if groups exist
def get_groups(path):
g_list = []
g_dict = {}
is_file = os.path.isfile(path)
if is_file:
df_groups = pd.read_csv(path)
for group in df_groups.keys():
g_list.append(group)
g_dict[group] = df_groups[group].dropna().tolist()
else:
pass
return g_list, g_dict, is_file
# Input: List of videos, behaviors, categories, hdf5 file path, and hdf5 file object
# Output: Returns complete dictionary of all behaviors, categories, and features
def get_feature_data(videos, behaviors, g_categories, path, h5file):
dict_features = {}
for b in behaviors:
dict_features[b] = {}
for category in g_categories:
if category == 'VideoData':
video = pd.read_hdf(path, key=category + '/' + videos[0])
dict_features[b][category] = \
{column: get_probability_data(column, videos, path) for column in video.columns if '_'+b in column}
else:
for sub_c in h5file.get(category).keys():
video = pd.read_hdf(path, key=category + '/' + sub_c)
if b == 'Attack':
dict_features[b][sub_c] = {column: video[column] for column in video.columns}
else:
dict_features[b][sub_c] = {column: video[column] for column in video.columns if b in column}
return dict_features
# Input: Current feature name, list of videos, and path of hdf5 file
# Output: Returns pandas dataframe for each video with probability features
def get_probability_data(feature, list_videos, path_name):
feature_data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='W', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='W', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='W', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007, quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7, hour=23,
minute=59, second=59)
self.assertEqual(ival_W.asfreq('A'), ival_W_to_A)
self.assertEqual(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
self.assertEqual(ival_W.asfreq('Q'), ival_W_to_Q)
self.assertEqual(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
self.assertEqual(ival_W.asfreq('M'), ival_W_to_M)
self.assertEqual(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
self.assertEqual(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
self.assertEqual(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
self.assertEqual(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
self.assertEqual(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
self.assertEqual(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
self.assertEqual(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
self.assertEqual(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
self.assertEqual(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
self.assertEqual(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
self.assertEqual(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
self.assertEqual(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
self.assertEqual(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
self.assertEqual(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
self.assertEqual(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
self.assertEqual(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
self.assertEqual(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
self.assertEqual(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
self.assertEqual(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
self.assertEqual(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
self.assertEqual(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
self.assertEqual(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
self.assertEqual(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
self.assertEqual(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
self.assertEqual(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
self.assertEqual(ival_W.asfreq('W'), ival_W)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
ival_W.asfreq('WK')
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK', year=2007, month=1, day=1)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-SAT', year=2007, month=1, day=6)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-FRI', year=2007, month=1, day=5)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-THU', year=2007, month=1, day=4)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-WED', year=2007, month=1, day=3)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-TUE', year=2007, month=1, day=2)
with self.assertRaisesRegexp(ValueError, msg):
Period(freq='WK-MON', year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='W', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1, hour=23,
minute=59, second=59)
self.assertEqual(ival_B.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
self.assertEqual(ival_B.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
self.assertEqual(ival_B.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
self.assertEqual(ival_B.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B_end_of_week.asfreq('W'), ival_B_to_W)
self.assertEqual(ival_B.asfreq('D'), ival_B_to_D)
self.assertEqual(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
self.assertEqual(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
self.assertEqual(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
self.assertEqual(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
self.assertEqual(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
self.assertEqual(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
self.assertEqual(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
# TODO: unused?
# ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = | Period(freq="Q-JAN", year=2007, quarter=4) | pandas.Period |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 14:18:10 2017
@author: massimo
Straight import of exiobase data
"""
import pandas as pd
import numpy as np
def importing(filename, celltype):
'''
Args:
'filename' [string] name of the file...
'celltype' [type of file], three values allowed:
'single': text file (maybe generated from a simple Excel file) format:
row 1: text, anything, e.g. "CountryCode_ActivityTypeName"
column 1: text, anything, e.g. "CountryCode_ProductTypeName"
'multi': text file (maybe generated from a complex Excel file) where:
row 2: float, total output per sector
row 4: text, CountryCode
row 5: text, ActivityTypeName
column 1: text, CountryCode
column 2: text, ProductTypeName
column 5: text, UnitCode
'exiobase': text file in exiobase format (used for SUP, USE, FD, emissions, resources):
row 1: text, CountryCode
row 2: text, ActivityTypename
column 1: text, CountryCode (or Comparment)
column 2: text, ProductTypeName (or Substance)
column 3: text, UnitCode
Setting any other value allows importing
exiobase format used for factors and materials, which is:
row 1: text, CountryCode
row 2: text, ActivityTypename
column 2: text, PhysicalTypeName (or FactorInputTypeNamey)
column 3: text, UnitCode
The factor name will be listed in the first index level
'''
if celltype == "single" and filename[-3:] == "txt":
print('Importing singletxt...')
MRtable = pd.read_table(filename, header=0, index_col=0)
MRtable = MRtable.astype(float)
print('Done, this is NOT a multi-index pd.DataFrame object')
elif celltype == "single" and filename[-3:] == "csv":
print('Importing singlecsv...')
MRtable = | pd.read_csv(filename, header=0, index_col=0, sep=';') | pandas.read_csv |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
#from .functions import tokenize
class LengthExtractor(BaseEstimator, TransformerMixin):
def get_length(self, text):
return len(text)
def fit(self, X, y=None):
return self
def transform(self, X):
X_w_length = pd.Series(X).apply(self.get_length)
return | pd.DataFrame(X_w_length) | pandas.DataFrame |
def Cosiner(params : dict):
def Column_correction(table):
drop_col = [i for i in table.columns if "Unnamed" in i]
table.drop(drop_col, axis = 1, inplace = True)
return table
def Samplewise_export(neg_csv_file, pos_csv_file, out_path, merged_edge_table, merged_node_table) :
print("Exporting sample-wise tables...")
neg_csv = pd.read_csv(neg_csv_file, index_col ="row ID")
pos_csv = pd.read_csv(pos_csv_file, index_col ="row ID")
neg_csv = Column_correction(neg_csv)
pos_csv = Column_correction(pos_csv)
neg_csv.columns = neg_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('NEG_', '', regex = False)
pos_csv.columns = pos_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('POS_', '', regex = False)
neg_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
pos_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
samples = list(set(list(neg_csv.columns) + list(pos_csv.columns)))
samples.sort()
nodes_neg = node_table[node_table['ion_mode'] == "NEG"]
nodes_neg = nodes_neg['feature_id'][nodes_neg['status_universal'] != "neutral"].astype(int).tolist()
neg_csv = neg_csv.loc[nodes_neg]
nodes_pos = node_table[node_table['ion_mode'] == "POS"]
nodes_pos = nodes_pos['feature_id'][nodes_pos['status_universal'] != "neutral"].astype(int).tolist()
pos_csv = pos_csv.loc[nodes_pos]
for sample in tqdm(samples):
#sample = samples[0]
ion_ids_neg = neg_csv.index[neg_csv[sample] > 0.0]
ion_ids_pos = pos_csv.index[pos_csv[sample] > 0.0]
#convert feature_ids to the new indexes
tmp_table = merged_node_table[merged_node_table['status'] != "neg_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "pos_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "mix_neutral"]
tmp_table_pos = tmp_table[tmp_table['ion_mode'] == "POS"]
tmp_table_neg = tmp_table[tmp_table['ion_mode'] == "NEG"]
ion_idx_neg = pd.Series(tmp_table_neg.index, index = tmp_table_neg['feature_id'])
ion_idx_neg = list(ion_idx_neg[ion_ids_neg])
ion_idx_pos = pd.Series(tmp_table_pos.index, index = tmp_table_pos['feature_id'])
ion_idx_pos = list(ion_idx_pos[ion_ids_pos])
ion_idx_mix = ion_idx_neg + ion_idx_pos
# Get sample neutrals
neutral_edges = merged_edge_table.loc[merged_edge_table["Adnotation"].dropna().index]
kept_edges = [i for i in neutral_edges.index if neutral_edges.loc[i, "node_2"] in ion_idx_mix]
# Get ion edges
ion_edges = merged_edge_table[merged_edge_table['status'] != "neg_add_edge"]
ion_edges = ion_edges[ion_edges['status'] != "pos_add_edge"]
for i in ion_edges.index:
if ion_edges.loc[i, "node_1"] in ion_idx_mix:
if ion_edges.loc[i, "node_2"] in ion_idx_mix:
kept_edges.append(i)
kept_edges.sort()
sample_edges = merged_edge_table.loc[kept_edges]
sample_edges.sort_values('node_1', inplace = True)
sample_edges.reset_index(inplace = True, drop = True)
kept_nodes = list(set(list(sample_edges['node_1']) + list(sample_edges['node_2'])))
kept_nodes.sort()
sample_nodes = merged_node_table.loc[kept_nodes].copy()
sample_nodes.drop( | pd.Series(samples) | pandas.Series |
# -*- coding: utf-8 -*-
"""Functions for the input and output of data and results.
todo: This file will be removed in version 0.10 and functionality moved to
datasets/_data_io.py
"""
import itertools
import os
import textwrap
from warnings import warn
import numpy as np
import pandas as pd
from sktime.datatypes._panel._convert import _make_column_names, from_long_to_nested
from sktime.transformations.base import BaseTransformer
from sktime.utils.validation.panel import check_X, check_X_y
class TsFileParseException(Exception):
"""Should be raised when parsing a .ts file and the format is incorrect."""
pass
class LongFormatDataParseException(Exception):
"""Should be raised when parsing a .csv file with long-formatted data."""
pass
def load_from_tsfile_to_dataframe(
full_file_path_and_name,
return_separate_X_and_y=True,
replace_missing_vals_with="NaN",
):
"""Load data from a .ts file into a Pandas DataFrame.
Parameters
----------
full_file_path_and_name: str
The full pathname of the .ts file to read.
return_separate_X_and_y: bool
true if X and Y values should be returned as separate Data Frames (
X) and a numpy array (y), false otherwise.
This is only relevant for data that
replace_missing_vals_with: str
The value that missing values in the text file should be replaced
with prior to parsing.
Returns
-------
DataFrame, ndarray
If return_separate_X_and_y then a tuple containing a DataFrame and a
numpy array containing the relevant time-series and corresponding
class values.
DataFrame
If not return_separate_X_and_y then a single DataFrame containing
all time-series and (if relevant) a column "class_vals" the
associated class values.
"""
warn(
"This function has moved to datasets/_data_io, this version will be removed "
"in V0.10",
FutureWarning,
)
# Initialize flags and variables used when parsing the file
metadata_started = False
data_started = False
has_problem_name_tag = False
has_timestamps_tag = False
has_univariate_tag = False
has_class_labels_tag = False
has_data_tag = False
previous_timestamp_was_int = None
prev_timestamp_was_timestamp = None
num_dimensions = None
is_first_case = True
instance_list = []
class_val_list = []
line_num = 0
# Parse the file
# print(full_file_path_and_name)
with open(full_file_path_and_name, "r", encoding="utf-8") as file:
for line in file:
# Strip white space from start/end of line and change to
# lowercase for use below
line = line.strip().lower()
# Empty lines are valid at any point in a file
if line:
# Check if this line contains metadata
# Please note that even though metadata is stored in this
# function it is not currently published externally
if line.startswith("@problemname"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(" ")
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException(
"problemname tag requires an associated value"
)
# problem_name = line[len("@problemname") + 1:]
has_problem_name_tag = True
metadata_started = True
elif line.startswith("@timestamps"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(" ")
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException(
"timestamps tag requires an associated Boolean " "value"
)
elif tokens[1] == "true":
timestamps = True
elif tokens[1] == "false":
timestamps = False
else:
raise TsFileParseException("invalid timestamps value")
has_timestamps_tag = True
metadata_started = True
elif line.startswith("@univariate"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(" ")
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException(
"univariate tag requires an associated Boolean " "value"
)
elif tokens[1] == "true":
# univariate = True
pass
elif tokens[1] == "false":
# univariate = False
pass
else:
raise TsFileParseException("invalid univariate value")
has_univariate_tag = True
metadata_started = True
elif line.startswith("@classlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(" ")
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException(
"classlabel tag requires an associated Boolean " "value"
)
if tokens[1] == "true":
class_labels = True
elif tokens[1] == "false":
class_labels = False
else:
raise TsFileParseException("invalid classLabel value")
# Check if we have any associated class values
if token_len == 2 and class_labels:
raise TsFileParseException(
"if the classlabel tag is true then class values "
"must be supplied"
)
has_class_labels_tag = True
class_label_list = [token.strip() for token in tokens[2:]]
metadata_started = True
# Check if this line contains the start of data
elif line.startswith("@data"):
if line != "@data":
raise TsFileParseException(
"data tag should not have an associated value"
)
if data_started and not metadata_started:
raise TsFileParseException("metadata must come before data")
else:
has_data_tag = True
data_started = True
# If the 'data tag has been found then metadata has been
# parsed and data can be loaded
elif data_started:
# Check that a full set of metadata has been provided
if (
not has_problem_name_tag
or not has_timestamps_tag
or not has_univariate_tag
or not has_class_labels_tag
or not has_data_tag
):
raise TsFileParseException(
"a full set of metadata has not been provided "
"before the data"
)
# Replace any missing values with the value specified
line = line.replace("?", replace_missing_vals_with)
# Check if we dealing with data that has timestamps
if timestamps:
# We're dealing with timestamps so cannot just split
# line on ':' as timestamps may contain one
has_another_value = False
has_another_dimension = False
timestamp_for_dim = []
values_for_dimension = []
this_line_num_dim = 0
line_len = len(line)
char_num = 0
while char_num < line_len:
# Move through any spaces
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# See if there is any more data to read in or if
# we should validate that read thus far
if char_num < line_len:
# See if we have an empty dimension (i.e. no
# values)
if line[char_num] == ":":
if len(instance_list) < (this_line_num_dim + 1):
instance_list.append([])
instance_list[this_line_num_dim].append(
pd.Series(dtype="object")
)
this_line_num_dim += 1
has_another_value = False
has_another_dimension = True
timestamp_for_dim = []
values_for_dimension = []
char_num += 1
else:
# Check if we have reached a class label
if line[char_num] != "(" and class_labels:
class_val = line[char_num:].strip()
if class_val not in class_label_list:
raise TsFileParseException(
"the class value '"
+ class_val
+ "' on line "
+ str(line_num + 1)
+ " is not "
"valid"
)
class_val_list.append(class_val)
char_num = line_len
has_another_value = False
has_another_dimension = False
timestamp_for_dim = []
values_for_dimension = []
else:
# Read in the data contained within
# the next tuple
if line[char_num] != "(" and not class_labels:
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " does "
"not "
"start "
"with a "
"'('"
)
char_num += 1
tuple_data = ""
while (
char_num < line_len
and line[char_num] != ")"
):
tuple_data += line[char_num]
char_num += 1
if (
char_num >= line_len
or line[char_num] != ")"
):
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " does "
"not end"
" with a "
"')'"
)
# Read in any spaces immediately
# after the current tuple
char_num += 1
while char_num < line_len and str.isspace(
line[char_num]
):
char_num += 1
# Check if there is another value or
# dimension to process after this tuple
if char_num >= line_len:
has_another_value = False
has_another_dimension = False
elif line[char_num] == ",":
has_another_value = True
has_another_dimension = False
elif line[char_num] == ":":
has_another_value = False
has_another_dimension = True
char_num += 1
# Get the numeric value for the
# tuple by reading from the end of
# the tuple data backwards to the
# last comma
last_comma_index = tuple_data.rfind(",")
if last_comma_index == -1:
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " contains a tuple that has "
"no comma inside of it"
)
try:
value = tuple_data[last_comma_index + 1 :]
value = float(value)
except ValueError:
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " contains a tuple that does "
"not have a valid numeric "
"value"
)
# Check the type of timestamp that
# we have
timestamp = tuple_data[0:last_comma_index]
try:
timestamp = int(timestamp)
timestamp_is_int = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_int = False
if not timestamp_is_int:
try:
timestamp = timestamp.strip()
timestamp_is_timestamp = True
except ValueError:
timestamp_is_timestamp = False
# Make sure that the timestamps in
# the file (not just this dimension
# or case) are consistent
if (
not timestamp_is_timestamp
and not timestamp_is_int
):
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " contains a tuple that "
"has an invalid timestamp '"
+ timestamp
+ "'"
)
if (
previous_timestamp_was_int is not None
and previous_timestamp_was_int
and not timestamp_is_int
):
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " contains tuples where the "
"timestamp format is "
"inconsistent"
)
if (
prev_timestamp_was_timestamp is not None
and prev_timestamp_was_timestamp
and not timestamp_is_timestamp
):
raise TsFileParseException(
"dimension "
+ str(this_line_num_dim + 1)
+ " on line "
+ str(line_num + 1)
+ " contains tuples where the "
"timestamp format is "
"inconsistent"
)
# Store the values
timestamp_for_dim += [timestamp]
values_for_dimension += [value]
# If this was our first tuple then
# we store the type of timestamp we
# had
if (
prev_timestamp_was_timestamp is None
and timestamp_is_timestamp
):
prev_timestamp_was_timestamp = True
previous_timestamp_was_int = False
if (
previous_timestamp_was_int is None
and timestamp_is_int
):
prev_timestamp_was_timestamp = False
previous_timestamp_was_int = True
# See if we should add the data for
# this dimension
if not has_another_value:
if len(instance_list) < (
this_line_num_dim + 1
):
instance_list.append([])
if timestamp_is_timestamp:
timestamp_for_dim = pd.DatetimeIndex(
timestamp_for_dim
)
instance_list[this_line_num_dim].append(
pd.Series(
index=timestamp_for_dim,
data=values_for_dimension,
)
)
this_line_num_dim += 1
timestamp_for_dim = []
values_for_dimension = []
elif has_another_value:
raise TsFileParseException(
"dimension " + str(this_line_num_dim + 1) + " on "
"line "
+ str(line_num + 1)
+ " ends with a ',' that "
"is not followed by "
"another tuple"
)
elif has_another_dimension and class_labels:
raise TsFileParseException(
"dimension " + str(this_line_num_dim + 1) + " on "
"line "
+ str(line_num + 1)
+ " ends with a ':' while "
"it should list a class "
"value"
)
elif has_another_dimension and not class_labels:
if len(instance_list) < (this_line_num_dim + 1):
instance_list.append([])
instance_list[this_line_num_dim].append(
| pd.Series(dtype=np.float32) | pandas.Series |
# This Source Code Form is subject to the terms of the MPL
# License. If a copy of the same was not distributed with this
# file, You can obtain one at
# https://github.com/akhilpandey95/altpred/blob/master/LICENSE.
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from datetime import datetime
from altmetric_api import add_pubdate
# function for preparing the 2014 altmetrics dataset
def data_load(file_path):
"""
Load the processed 2014 altmetrics dataset
Parameters
----------
No arguments
Returns
-------
Dataframe
pandas.DataFrame
"""
try:
# read the dataset first
data = | pd.read_csv(file_path, low_memory=False) | pandas.read_csv |
import copy
import gc
import os
from datetime import datetime
import numpy as np
import pandas as pd
import tifffile as tif
from tifffile import TiffWriter
from .adaptive_estimation import AdaptiveShiftEstimation
from .image_positions import load_necessary_xml_tags, get_image_sizes_scan_auto, get_image_sizes_scan_manual, \
get_path_for_each_plane_and_field_per_channel
from .image_processing import stitch_z_projection, create_z_projection_for_fov, stitch_plane, stitch_images
from .ome_tags import create_ome_metadata, get_channel_metadata
from .saving_loading import load_parameters, save_parameters
class ImageStitcher:
def __init__(self):
# user input
self._img_dir = ''
self._xml_path = None
self._out_dir = ''
self._reference_channel = ''
self._stitch_only_ch = ['all']
self._scan = ''
self._stitching_mode = ''
self._ill_cor_ch = ['none']
self._is_adaptive = True
self._make_preview = True
self._save_param = ''
self._load_param_path = 'none'
self._img_name = ''
self._fovs = None
self._extra_meta = None
# working variables
self._channel_names = []
self._nchannels = 0
self._dtype = np.uint16
self._measurement_time = ''
self._ome_meta = ''
self._preview_ome_meta = ''
self._channel_ids = {}
self._y_pos = None
self._default_img_shape = tuple()
def stitch(self):
st = datetime.now()
print('\nstarted', st)
self.check_dir_exist()
self.check_scan_modes()
tag_Images, field_path_list, plane_path_list = self.load_metadata()
self._default_img_shape = (int(tag_Images[0].find('ImageSizeY').text), int(tag_Images[0].find('ImageSizeX').text))
ids, x_size, y_size = self.estimate_image_sizes(tag_Images, field_path_list)
self.generate_ome_meta(self._channel_ids, x_size, y_size, tag_Images, plane_path_list)
self.perform_stitching(ids, x_size, y_size, plane_path_list, field_path_list, self._ome_meta)
self.write_separate_ome_xml()
fin = datetime.now()
print('\nelapsed time', fin - st)
def check_dir_exist(self):
# check if input and output directories exist
if not os.path.isdir(self._img_dir):
raise ValueError('img_dir does not exist')
if not os.path.exists(self._out_dir):
os.makedirs(self._out_dir)
if not self._out_dir.endswith('/'):
self._out_dir = self._out_dir + '/'
if not self._img_dir.endswith('/'):
self._img_dir = self._img_dir + '/'
if self._xml_path is None:
self._xml_path = self._img_dir + 'Index.idx.xml'
def check_scan_modes(self):
available_scan_modes = ('auto', 'manual')
if self._scan not in available_scan_modes:
raise ValueError('Incorrect scan mode. Available scan modes ' + ', '.join(available_scan_modes))
available_stitching_modes = ('stack', 'maxz')
if self._stitching_mode not in available_stitching_modes:
raise ValueError(
'Incorrect stitching mode. Available stitching modes ' + ', '.join(available_stitching_modes))
def load_metadata(self):
tag_Images, tag_Name, tag_MeasurementStartTime = load_necessary_xml_tags(self._xml_path)
if self._fovs is not None:
self._fovs = [int(f) for f in self._fovs.split(',')]
plane_path_list, field_path_list = get_path_for_each_plane_and_field_per_channel(tag_Images, self._img_dir, self._fovs)
nchannels = len(plane_path_list.keys())
channel_names = list(plane_path_list.keys())
channel_ids = {ch: i for i, ch in enumerate(channel_names)}
if isinstance(self._stitch_only_ch, str):
self._stitch_only_ch = [self._stitch_only_ch]
if self._stitch_only_ch == ['all']:
self._stitch_only_ch = channel_names
if self._reference_channel == 'none':
self._reference_channel = channel_names[0]
elif self._stitch_only_ch != ['all']:
# if user specified custom number of channels check if they are correct
for i in self._stitch_only_ch:
if i not in channel_names:
raise ValueError('There is no channel with name ' + i + ' in the XML file. ' +
'Available channels ' + ', '.join(channel_names))
if self._reference_channel == 'none':
self._reference_channel = self._stitch_only_ch[0]
nchannels = len(self._stitch_only_ch)
channel_names = self._stitch_only_ch
if isinstance(self._ill_cor_ch, str):
self._ill_cor_ch = [self._ill_cor_ch]
if self._ill_cor_ch == ['all']:
self._ill_cor_ch = {ch: True for ch in channel_names}
elif self._ill_cor_ch == ['none']:
self._ill_cor_ch = {ch: False for ch in channel_names}
else:
self._ill_cor_ch = {ch: (True if ch in self._ill_cor_ch else False) for ch in channel_names}
self._channel_ids = {k: v for k, v in channel_ids.items() if k in channel_names}
self._channel_names = channel_names
self._nchannels = nchannels
self._measurement_time = tag_MeasurementStartTime
if self._img_name == '':
self._img_name = tag_Name
if not self._img_name.endswith(('.tif', '.tiff')):
self._img_name += '.tif'
return tag_Images, field_path_list, plane_path_list
def estimate_image_sizes(self, tag_Images, field_path_list):
if self._load_param_path == 'none':
if self._scan == 'auto':
ids, x_size, y_size, ids_in_clusters, self._y_pos = get_image_sizes_scan_auto(tag_Images, self._reference_channel, self._fovs)
elif self._scan == 'manual':
ids, x_size, y_size = get_image_sizes_scan_manual(tag_Images, self._reference_channel, self._fovs)
if self._is_adaptive == False:
ids = pd.DataFrame(ids)
x_size = pd.DataFrame(x_size)
y_size = | pd.DataFrame(y_size) | pandas.DataFrame |
import pandas as pd
import numpy as np
import streamlit as st
import math
from utilityfunctions import loadPowerCurve, binWindResourceData, searchSorted, preProcessing, getAEP, checkConstraints
from shapely.geometry import Point # Imported for constraint checking
from shapely.geometry.polygon import Polygon
import plotly.express as px
from geneticalgorithm import geneticalgorithm as ga
import randomsearch as rs
import pyswarms as ps
import random
import warnings
warnings.filterwarnings("ignore")
def optimizer(wind_data, powercurve_data):
st.write('''## Optimizer Result''')
st.write('Uses Genetic Algorithm to converge to optimal x and y coordinates')
if wind_data is not None and powercurve_data is not None :
power_curve_data = loadPowerCurve(powercurve_data)
st.success("Powerdata loaded successfully")
wind_data = binWindResourceData(wind_data)
st.success("winddata loaded sucessfully")
# Turbine Specifications.
st.write('''## Turbine Specifications''')
global turb_diam, turb_height
turb_diam = st.number_input("Turbine Diameter (in m)",min_value= 60, max_value=120, value=100, step=1)
turb_height = st.number_input("Turbine Height (in m)",min_value= 80, max_value=140, value=100, step=1)
turb_specs = {
'Name': '<NAME>',
'Vendor': 'Anon Vendor',
'Type': 'Anon Type',
'Dia (m)': turb_diam,
'Rotor Area (m2)': 7853,
'Hub Height (m)': turb_height,
'Cut-in Wind Speed (m/s)': 3.5,
'Cut-out Wind Speed (m/s)': 25,
'Rated Wind Speed (m/s)': 15,
'Rated Power (MW)': 3
}
turb_diam = turb_specs['Dia (m)']
turb_rad = turb_diam/2
power_curve = power_curve_data
wind_inst_freq = wind_data
st.write('''## Field Specifications''')
global n
n = st.number_input("Number of turbines, n",min_value= 10, max_value=60, value=40, step=1)
side = st.slider("side length (in m)", min_value = 100, max_value = 10000, value = 4000) # in m , 100 - 10,000
st.write('''## Constraints''')
peri_constr = st.number_input("Perimeter constraint (in m)",min_value= 10, max_value=100, value=50, step=1) # 10 - 100
prox_constr = st.number_input("Proximity constraint (in m)",min_value= 250, max_value=600, value=400, step=1) # 250-800
n_wind_instances, cos_dir, sin_dir, wind_sped_stacked, C_t = preProcessing(power_curve, n)
st.write('''## Select the Algorithms to use''')
if st.checkbox('Genetic Algorithm', value=False):
col1, col2 = st.beta_columns([0.5, 9.5])
col2.subheader("Using Genetic Algorithm for optimization")
max_iter = col2.slider("Max Number of Iterations", min_value = 10, max_value = 1000, value = 50)
population_size = col2.number_input("Population size",min_value= 10, max_value=100, value= 30, step=1)
var_bound = np.array([[peri_constr,side - peri_constr]]*(2*n))
algorithm_param = {'max_num_iteration':max_iter,\
'population_size':population_size,\
'mutation_probability':0.1,\
'elit_ratio': 0.2,\
'crossover_probability': 0.5,\
'parents_portion': 0.3,\
'crossover_type':'uniform',\
'max_iteration_without_improv':150}
col2.write('If values are set click on run')
if col2.button('Run'):
def f(z):
pen = 0
for i in range(n):
for j in range(i):
dist = math.sqrt((z[i]-z[j])**2+(z[n+i]-z[n+j])**2)
if dist>prox_constr:
pen = pen + 600 + 1000*dist
data_dict = {'x':list(z[0:n]),'y':list(z[n:2*n])}
df1 = | pd.DataFrame(data_dict) | pandas.DataFrame |
"""
This function get all the featueres to online processing.
"""
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import pandas as pd
from gensim.corpora.dictionary import Dictionary
from gensim.models.ldamodel import LdaModel
"""
Common text processing functionalities.
"""
def general_text_processing(data):
regex_list = [('\\S*@\\S*\\s?', ''),
('\\s+', ' '),
("\\'", ""),
("\\d+", "")
]
for regex_text in regex_list:
data = re.sub(regex_text[0], regex_text[1], data)
return data
"""
Parallelize stopwords
"""
import multiprocessing as mp # cpu_count, Parallel, Pool
import numpy as np
cores = mp.cpu_count() # Number of CPU cores on your system
partitions = cores # Define as many partitions as you want
def get_split(data, n):
size = data.shape[0]
ret = []
k = int((size + n) / n)
for i in range(1, size + 1):
ret.append(data[(i - 1) * k: min(size, i * k)])
return ret
def parallelize(data, func):
data_split = get_split(data, cores)
pool = mp.Pool(cores)
data = pd.concat(pool.map(func, data_split))
pool.close()
pool.join()
return data
stop_words = set(stopwords.words('english'))
"""
clean tweet function.
Standard refered from the website.
"""
def clean_tweet(text):
# Create a string form of our list of text
if pd.isnull(text) or pd.isna(text):
return ""
global stop_words
raw_string = text
#raw_string = ''.join(text)
no_links = re.sub(r'http\S+', '', raw_string)
no_unicode = re.sub(r"\\[a-z][a-z]?[0-9]+", '', no_links)
no_special_characters = re.sub('[^A-Za-z ]+', '', no_unicode)
words = no_special_characters.split(" ")
words = [w for w in words if len(w) > 2]
words = [w.lower() for w in words]
words = [w for w in words if w not in stop_words]
# ret = ' '.join(words)
return words
"""
Remove stopwords
"""
def remove_stop_words(text):
valid_words = [x for x in re.split('^[a-zA-Z]', text) if x not in stop_words]
valid_words = [x for x in valid_words if len(x) != 0]
### Empty
if (len(valid_words) == 0):
return ""
return " ".join(valid_words)
"""
Fill dictionary
"""
def fill_lda_result(df, lda_model, dictionary, topic_count):
values = df['tweet'].values.tolist()
doc2_corupus = [dictionary.doc2bow(text.split()) for
text in values]
predicted_values = [lda_model[vec] for vec in doc2_corupus]
"""
append to column
"""
for i in range(len(predicted_values)):
temp = [0 for x in range(topic_count)]
for ele in predicted_values[i]:
temp[ele[0]] = ele[1]
predicted_values[i] = temp
for index in range(topic_count):
col_name = "topic_" + str(index)
df[col_name] = [x[index] for x in predicted_values]
return df
def fill_lda_result_2(df, lda_model, dictionary, topic_count):
values = df['tweet'].values.tolist()
doc2_corupus = [dictionary.doc2bow(text) for
text in values]
predicted_values = [lda_model[vec] for vec in doc2_corupus]
"""
append to column
"""
for i in range(len(predicted_values)):
temp = [0 for x in range(topic_count)]
for ele in predicted_values[i]:
temp[ele[0]] = ele[1]
predicted_values[i] = temp
for index in range(topic_count):
col_name = "topic_" + str(index)
df[col_name] = [x[index] for x in predicted_values]
return df
import os
"""
Topic modeling features.
pass cached = False incase you don't want to used earlier data split.
"""
def topic_model(df_train, df_test, topic_count=10, cached=True):
lda_train_save_file = '../data/lsa_train.csv'
lda_test_save_file = '../data/lsa_test.csv'
if (os.path.exists(lda_train_save_file) and cached):
pd.read_csv(lda_train_save_file), pd.read_csv(lda_test_save_file)
### cleanup
#parallel_proces(test_src,'../data/training_user_tweet_processed.csv')
## general remove text
#df_train['tweet'] = df_train['tweet'].fillna("")
#df_test['tweet'] = df_test['tweet'].fillna("")
# df_train['tweet'] = df_train['tweet'].map(general_text_processing)
# df_test['tweet'] = df_test['tweet'].map(general_text_processing)
"""
Parallel tweet.
"""
# df_test['tweet'] = parallelize(df_test, clean_tweet)
# df_train['tweet'] = parallelize(df_train, clean_tweet)
#df_train['tweet'] = df_train['tweet'].map(clean_tweet)
#df_test['tweet'] = df_test['tweet'].map(clean_tweet)
## remove stop words
# df_train['tweet'] = df_train['tweet'].map(remove_stop_words)
# df_test['tweet'] = df_test['tweet'].map(remove_stop_words)
## gensim lda
# dictionary = Dictionary()
# for t in df_train.tweet.values.tolist():
# #print(t)
# dictionary.add_documents([t.split()])
dictionary = Dictionary()
for t in df_train.tweet.values.tolist():
# print(t)
dictionary.add_documents([t])
# for t in df_test['tweet'].values.tolist() :
# print(t)
# print(t[0].split())
# print(dictionary.doc2bow(t.split()))
train_doc2_corupus = [dictionary.doc2bow(text) for text in df_train['tweet'].values.tolist()]
# train_doc2_corupus = [dictionary.doc2bow(text.split()) for
# text in df_train['tweet'].values.tolist()]
# print(train_doc2_corupus)
print("Started LDA")
lda_model = LdaModel(train_doc2_corupus, num_topics=topic_count, iterations=30)
print("Completed LDA")
"""
fill topics
"""
df_test = fill_lda_result_2(df_test, lda_model, dictionary,
topic_count)
df_train = fill_lda_result_2(df_train, lda_model, dictionary,
topic_count)
"""
Save the file
"""
df_train.to_csv(lda_train_save_file, index=False)
df_test.to_csv(lda_test_save_file, index=False)
"""
return
"""
print('LDA Completed')
return df_train, df_test
"""
Load the glove 2 vec
"""
def load_glov_vec(glove_file):
mappings = {}
with open(glove_file) as file:
for line in file:
splits = line.split()
mappings[splits[0]] = splits[1:]
return mappings
"""
Gensim average word encoding.
@input: dataframe with column tweet
@output: dataframe with averge word_count
"""
def glove_encode(df, glove_file, dims=27):
glove_model = load_glov_vec(glove_file)
## create representation
tweets = df['tweet'].values.tolist()
mappings = []
"""
Get the tweet
"""
for t in tweets:
cur = [0 for x in range(dims)]
size = 0
for word in t.split():
word = word.lower()
if word in glove_model:
temp_vec = glove_model[word]
# print(temp_vec)
for i in range(dims):
cur[i] += float(temp_vec[i])
size += 1
if size != 0:
for i in range(dims):
cur[i] /= size
mappings.append(cur)
"""
append dataframe
"""
for i in range(dims):
col_name = 'glove_' + str(i)
df[col_name] = [x[i] for x in mappings]
return df
def text_process_split(input):
input_file, start, end, out_folder = input
out_file = os.path.join(out_folder, 'part-{}.csv'.format(start))
df = pd.read_csv(input_file)
df = df[start:end]
df['tweet'] = df.tweet.map(clean_tweet)
df.to_csv(out_file)
return True
def parallel_proces(input_file, out_folder):
df = | pd.read_csv(input_file) | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class ScreenipFunctions(object):
"""
Function class for screenip.
"""
def __init__(self):
"""Class representing the functions for screenip"""
super(ScreenipFunctions, self).__init__()
def fw_bird(self):
"""
For birds, the daily water intake rate is calculated using the equation below.
This equation is representative of passerine birds, which represent the majority
of bird species visiting agricultural areas and which have higher daily water flux
requirements than other birds. As a result, the equations represent the most
conservative estimate of pesticide concentrations in water. The resulting daily
water intake rate for the 20 g bird is 0.0162 L.
Flux(water) = (1.180 * BW^0.874) / 1000
where: BW = 20 g
"""
"""
Using fixed value to correctly handle floating point decimals as compared to spreadsheet implementation
"""
fw_bird = 0.0162
self.out_fw_bird = pd.Series([fw_bird for x in range(self.no_of_runs)])
return self.out_fw_bird
# Daily water intake rate for mammals
def fw_mamm(self):
"""
For mammals, the daily water intake rate is calculated using the equation below.
This equation is representative of eutherian herbivore mammals, which have higher
daily water flux requirements compared to other mammals that visit agricultural areas.
The only equation that would generate higher estimates of daily water flux corresponds
to marsupial carnivores, which are not considered to be representative of the majority
of mammals that visit agricultural areas. The resulting daily water intake rate for a
1000 g mammal is 0.172 L.
Flux(water) = (0.708 * BW^0.795) / 1000
where: BW = 1000 g
"""
"""
Using fixed value to correctly handle floating point decimals as compared to spreadsheet implementation
"""
fw_mamm = 0.172
self.out_fw_mamm = pd.Series([fw_mamm for x in range(self.no_of_runs)])
return self.out_fw_mamm
# Upper bound estimate of exposure for birds
def dose_bird(self):
"""
The model calculates the upper bound estimate of exposure in drinking water
(dose-based; units in mg/kg-bw) by multiplying the daily water intake rate (L)
by the chemical solubility (mg/L) and then dividing by the body weight (in kg)
of the assessed animal (See equation below). In cases where water characteristics
(e.g., pH) influence the solubility of a chemical in water, the user should select
the highest available water solubility for use in SIP.
Dose = (Flux(water) * solubility) / BW
where: BW = body weight (kg) of the assessed bird (e.g. mallard duck, bobtail quail, other)
"""
conv = 1000.0
self.out_dose_bird = (self.out_fw_bird * self.solubility) / (self.bodyweight_assessed_bird / conv)
return self.out_dose_bird
# Upper bound estimate of exposure for mammals
def dose_mamm(self):
"""
The model calculates the upper bound estimate of exposure in drinking water
(dose-based; units in mg/kg-bw) by multiplying the daily water intake rate (L)
by the chemical solubility (mg/L) and then dividing by the body weight (in kg)
of the assessed animal (See equation below). In cases where water characteristics
(e.g., pH) influence the solubility of a chemical in water, the user should select
the highest available water solubility for use in SIP.
Dose = (Flux(water) * solubility) / BW
where: BW = body weight (kg) of the assessed animal (e.g. laboratory rat, other)
"""
conv = 1000.0
self.out_dose_mamm = (self.out_fw_mamm * self.solubility) / (self.bodyweight_assessed_mammal / conv)
return self.out_dose_mamm
# Acute adjusted toxicity value for birds
def at_bird(self):
"""
LD50 values for mammals and birds are adjusted using the same approach employed
by T-REX (USEPA 2008). These equations are provided below. In these equations,
AT = adjusted toxicity value (mg/kg-bw); LD50 = endpoint reported by toxicity study
(mg/kg-bw); TW = body weight of tested animal (350g rat, 1580g mallard duck, 178 g
Northern bobwhite quail or weight defined by the model user for an alternative species);
AT = LD50* (AW / TW)^(x-1)
where:
AW = body weight of assessed animal (g)
x = Mineau scaling factor. Chemical specific values for x may be located in the
worksheet titled "Mineau scaling factors." If no chemical specific data are available,
the default value of 1.15 should be used for this parameter.
"""
self.out_at_bird = self.ld50_avian_water * (
(self.bodyweight_assessed_bird / self.ld50_bodyweight_tested_bird) ** (self.mineau_scaling_factor - 1.))
return self.out_at_bird
# Acute adjusted toxicity value for mammals
def at_mamm(self):
"""
LD50 values for mammals and birds are adjusted using the same approach employed
by T-REX (USEPA 2008). These equations are provided below. In these equations,
AT = adjusted toxicity value (mg/kg-bw); LD50 = endpoint reported by toxicity study
(mg/kg-bw); TW = body weight of tested animal (350g rat, 1580g mallard duck, 178 g
Northern bobwhite quail or weight defined by the model user for an alternative species);
AT = LD50* (TW / AW)^0.25
where:
AW = body weight of assessed animal (g)
x = Mineau scaling factor. Chemical specific values for x may be located in the
worksheet titled "Mineau scaling factors." If no chemical specific data are available,
the default value of 1.15 should be used for this parameter.
"""
self.out_at_mamm = self.ld50_mammal_water * (
(self.ld50_bodyweight_tested_mammal / self.bodyweight_assessed_mammal) ** 0.25)
return self.out_at_mamm
# Adjusted chronic toxicity values for birds
# FI = Food Intake Rate
def fi_bird(self, bw_grams):
"""
Daily Food Intake Rate:
Chronic avian toxicity studies produce endpoints based on concentration in food, not dose.
The endpoint is a No Observed Adverse Effects Concentration (NOAEC) that is assumed to be
relevant to all birds, regardless of body weight. In order to convert a reported avian
NOAEC (mg/kg-diet) value to a dose equivalent toxicity value for the assessed animal,
the daily food (dry) intake of the test bird is considered. The daily food intake rate
(FI; units in kg-food) of the test bird is calculated using the equation below.
FI = 0.0582 * BW^0.651
where:
BW = body weight in kg (USEPA 1993). This equation corresponds to a daily food intake
rate for all birds, which generates a lower food intake rate compared to passerines.
The equation is more conservative because it results in a lower dose-equivalent toxicity value.
"""
#bw_grams is the bodyweight of test bird (it's a series with a value per model simulation run)
fi_bird = 0.0582 * ((bw_grams / 1000.) ** 0.651)
return fi_bird
# Dose-equivalent chronic toxicity value for birds
def det(self):
"""
Dose Equiv. Toxicity:
The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by
the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):
Dose Equiv. Toxicity = (NOAEC * FI) / BW
NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,
and for any other test species. The model calculates the dose equivalent toxicity values for
all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose
equivalent toxicity value to represent the chronic toxicity of the chemical to birds.
"""
try:
# Body weight of bobtail quail is 178 g (assigned across all model simulation runs)
bw_quail_series = pd.Series([self.bodyweight_bobwhite_quail for x in range(self.no_of_runs)])
self.out_det_quail = (self.noaec_quail * self.fi_bird(bw_quail_series)) / (bw_quail_series / 1000.)
except Exception:
pass
try:
# Body weight of mallard duck is 1580 g (assigned across all model simulation runs)
bw_duck_series = pd.Series([self.bodyweight_mallard_duck for x in range(self.no_of_runs)])
self.out_det_duck = (self.noaec_duck * self.fi_bird(bw_duck_series)) / (bw_duck_series / 1000.)
except Exception:
pass
try:
self.out_det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.noaec_bodyweight_bird_other_1)) / (
self.noaec_bodyweight_bird_other_1 / 1000.)
except Exception:
pass
# self.out_det_other_1 = pd.Series(None, list(range(self.chemical_name.size)))
try:
self.out_det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.noaec_bodyweight_bird_other_2)) / (
self.noaec_bodyweight_bird_other_2 / 1000.)
except Exception:
pass
# self.out_det_other_2 = pd.Series(None, list(range(self.chemical_name.size)))
# Create DataFrame containing method Series created above
df_noaec = pd.DataFrame({
'out_det_quail': self.out_det_quail,
'out_det_duck': self.out_det_duck,
'out_det_other_1': self.out_det_other_1,
'out_det_other_2': self.out_det_other_2
})
# Create a Series of the minimum values for each row/model run of the above DataFrame
self.out_det = df_noaec.min(axis=1, numeric_only=True)
return self.out_det
# Adjusted chronic toxicty value for mammals
def act(self):
"""
SIP relies upon the No Observed Adverse Effects Level (NOAEL; mg/kg-bw) from a chronic mammalian study.
If only a NOAEC value (in mg/kg-diet) is available, the model user should divide the NOAEC by 20 to
determine the equivalent chronic daily dose. This approach is consistent with that of T-REX, which
relies upon the standard FDA lab rat conversion. (USEPA 2008). Mammalian NOAEL values are adjusted
using the same approach employed by T-REX (USEPA 2008). The equation for mammals is provided below
(variables are defined above).
AT = NOAEL * (TW / AW)^0.25
"""
self.out_act = self.noael_mammal_water * (
(self.noael_bodyweight_tested_mammal / self.bodyweight_assessed_mammal) ** 0.25)
# MAMMILIAN: If only a NOAEC value (in mg/kg-diet) is available, the model user should divide the NOAEC
# by 20 to determine the equivalent chronic daily dose (NOAEL)
return self.out_act
# Acute exposures for birds
def acute_bird(self):
"""
For acute exposures, if the ratio of the upper bound dose to the adjusted LD50 value is <0.1,
the risk assessor can conclude that pesticide exposure to mammals or birds through drinking
water by itself is not an exposure route of concern. If the ratio of the upper bound dose to
the adjusted LD50 value is >=0.1, the risk assessor can conclude that pesticide exposure to
mammals or birds through drinking water by itself is an exposure route of concern.
"""
self.out_acute_bird = self.out_dose_bird / self.out_at_bird
return self.out_acute_bird
def acuconb(self):
"""
Message stating whether or not a risk is present
"""
msg_pass = 'Drinking water exposure alone is NOT a potential concern for birds'
msg_fail = 'Exposure through drinking water alone is a potential concern for birds'
boo_ratios = [ratio < 0.1 for ratio in self.out_acute_bird]
self.out_acuconb = | pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
from pathlib import Path
import pandas as pd, numpy as np
from itertools import combinations
from scipy.spatial.distance import pdist, squareform
from skbio import DistanceMatrix
from skbio.stats.distance import permanova
script_folder = Path.cwd()
outputs_folder = script_folder.parent / 'Outputs'
fname = outputs_folder / 'Seurat_integration_PCA_cell_embeddings.txt'
pca = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
pca = pca.iloc[:, :18]
fname = outputs_folder / 'Seurat_integration_SNN_clusters.txt'
clusters = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
fname = outputs_folder / 'WT_and_KO_cells_celltypes.txt'
celltypes = pd.read_csv(fname, sep='\t', header=0, index_col=0, encoding='utf-8')
cluster2celltype = {}
for C in [8, 10]:
cells = clusters.index[clusters['Cluster'] == C].tolist()
temp = {}
for barcode in cells:
celltype = celltypes.loc[barcode, 'Maintype']
if celltype not in temp:
temp.update({celltype:[]})
temp[celltype].append(barcode)
cluster2celltype.update({C:temp})
fname = outputs_folder / 'Permanova_results.xlsx'
with pd.ExcelWriter(fname) as writer:
for C in [8, 10]:
frames = []
for (celltype_A, celltype_B) in list(combinations(sorted(cluster2celltype[C].keys()), 2)):
cells = cluster2celltype[C][celltype_A] + cluster2celltype[C][celltype_B]
grouping = [celltype_A]*len(cluster2celltype[C][celltype_A]) + [celltype_B]*len(cluster2celltype[C][celltype_B])
X = pca.loc[cells, :].copy()
dm = squareform(pdist(X, metric='euclidean'))
dist_mat = DistanceMatrix(dm, cells)
np.random.seed(0)
result = permanova(dist_mat, grouping, permutations=1000)
result.name = ('%s vs %s'%(celltype_A, celltype_B))
frames.append(result)
result = | pd.concat(frames, axis='columns') | pandas.concat |
import os
import json
import requests
import thingspeak
import datetime
import pandas as pd
from functools import reduce
from itertools import tee
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def get_block(start, end):
params = { 'start': str(start),
'end': str(end),
'average': '60'
}
try:
r = channel.get(params)
except:
raise
print('error')
data = [list(feed.values()) for feed in json.loads(r)['feeds']]
block_df = pd.DataFrame(data, columns = cols)
return block_df
sensor_list = | pd.read_csv('pa_sensor_list.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas import DataFrame, Index, Series, Timestamp
from pandas.util.testing import assert_almost_equal
def _assert_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `assert_almost_equal`.
"""
assert_almost_equal(a, b, **kwargs)
assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `assert_almost_equal`.
"""
try:
assert_almost_equal(a, b, **kwargs)
msg = ("{a} and {b} were approximately equal "
"when they shouldn't have been").format(a=a, b=b)
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
kwargs : dict
The arguments passed to `tm.assert_almost_equal`.
"""
_assert_not_almost_equal(a, b, **kwargs)
_assert_not_almost_equal(b, a, **kwargs)
@pytest.mark.parametrize("a,b", [
(1.1, 1.1), (1.1, 1.100001), (np.int16(1), 1.000001),
(np.float64(1.1), 1.1), (np.uint32(5), 5),
])
def test_assert_almost_equal_numbers(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(1.1, 1), (1.1, True), (1, 2), (1.0001, np.int16(1)),
])
def test_assert_not_almost_equal_numbers(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(0, 0), (0, 0.0), (0, np.float64(0)), (0.000001, 0),
])
def test_assert_almost_equal_numbers_with_zeros(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(0.001, 0), (1, 0),
])
def test_assert_not_almost_equal_numbers_with_zeros(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
(1, "abc"), (1, [1, ]), (1, object()),
])
def test_assert_not_almost_equal_numbers_with_mixed(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize(
"left_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"])
@pytest.mark.parametrize(
"right_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"])
def test_assert_almost_equal_edge_case_ndarrays(left_dtype, right_dtype):
# Empty compare.
_assert_almost_equal_both(np.array([], dtype=left_dtype),
np.array([], dtype=right_dtype),
check_dtype=False)
def test_assert_almost_equal_dicts():
_assert_almost_equal_both({"a": 1, "b": 2}, {"a": 1, "b": 2})
@pytest.mark.parametrize("a,b", [
({"a": 1, "b": 2}, {"a": 1, "b": 3}),
({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}),
({"a": 1}, 1), ({"a": 1}, "abc"), ({"a": 1}, [1, ]),
])
def test_assert_not_almost_equal_dicts(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("val", [1, 2])
def test_assert_almost_equal_dict_like_object(val):
dict_val = 1
real_dict = dict(a=val)
class DictLikeObj:
def keys(self):
return "a",
def __getitem__(self, item):
if item == "a":
return dict_val
func = (_assert_almost_equal_both if val == dict_val
else _assert_not_almost_equal_both)
func(real_dict, DictLikeObj(), check_dtype=False)
def test_assert_almost_equal_strings():
_assert_almost_equal_both("abc", "abc")
@pytest.mark.parametrize("a,b", [
("abc", "abcd"), ("abc", "abd"), ("abc", 1), ("abc", [1, ]),
])
def test_assert_not_almost_equal_strings(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3])),
])
def test_assert_almost_equal_iterables(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [
# Class is different.
(np.array([1, 2, 3]), [1, 2, 3]),
# Dtype is different.
(np.array([1, 2, 3]), np.array([1., 2., 3.])),
# Can't compare generators.
(iter([1, 2, 3]), [1, 2, 3]), ([1, 2, 3], [1, 2, 4]),
([1, 2, 3], [1, 2, 3, 4]), ([1, 2, 3], 1),
])
def test_assert_not_almost_equal_iterables(a, b):
_assert_not_almost_equal(a, b)
def test_assert_almost_equal_null():
_assert_almost_equal_both(None, None)
@pytest.mark.parametrize("a,b", [
(None, np.NaN), (None, 0), (np.NaN, 0),
])
def test_assert_not_almost_equal_null(a, b):
_assert_not_almost_equal(a, b)
@pytest.mark.parametrize("a,b", [
(np.inf, np.inf), (np.inf, float("inf")),
(np.array([np.inf, np.nan, -np.inf]),
np.array([np.inf, np.nan, -np.inf])),
(np.array([np.inf, None, -np.inf], dtype=np.object_),
np.array([np.inf, np.nan, -np.inf], dtype=np.object_)),
])
def test_assert_almost_equal_inf(a, b):
_assert_almost_equal_both(a, b)
def test_assert_not_almost_equal_inf():
_assert_not_almost_equal_both(np.inf, 0)
@pytest.mark.parametrize("a,b", [
(Index([1., 1.1]), Index([1., 1.100001])),
(Series([1., 1.1]), Series([1., 1.100001])),
(np.array([1.1, 2.000001]), np.array([1.1, 2.0])),
(DataFrame({"a": [1., 1.1]}), DataFrame({"a": [1., 1.100001]}))
])
def test_assert_almost_equal_pandas(a, b):
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_object():
a = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
b = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_value_mismatch():
msg = "expected 2\\.00000 but got 1\\.00000, with decimal 5"
with pytest.raises(AssertionError, match=msg):
| assert_almost_equal(1, 2) | pandas.util.testing.assert_almost_equal |
from . import __VERSION__
from .cc_metrics import CC_METRICS
from .season import SORT_BY_COLUMNS
from .season import SPECIAL_REPORTS
import argparse
import os
import tbapy
import pandas as pd
import numpy as np
from numpy.linalg import linalg
from numpy.linalg import LinAlgError
def get_opr_df(oprs_raw):
teams = [int(team[3:]) for team, _ in sorted(list(oprs_raw["oprs"].items()))]
opr_df = pd.DataFrame(index=teams)
opr_df["OPR"] = [opr for _, opr in sorted(list(oprs_raw["oprs"].items()))]
return opr_df.sort_index()
def get_rankings_df(rankings_raw):
teams = [int(i["team_key"][3:]) for i in rankings_raw["rankings"]]
ranking_df = pd.DataFrame(index=teams)
ranking_df["W"] = [i["record"]["wins"] for i in rankings_raw["rankings"]]
ranking_df["L"] = [i["record"]["losses"] for i in rankings_raw["rankings"]]
ranking_df["T"] = [i["record"]["ties"] for i in rankings_raw["rankings"]]
# ranking_df["RP"] = [i["extra_stats"][0] for i in rankings_raw["rankings"]]
ranking_df["RNK"] = [i["rank"] for i in rankings_raw["rankings"]]
ranking_df["DQ"] = [i["dq"] for i in rankings_raw["rankings"]]
return ranking_df.sort_index()
def is_completed(match):
return match["post_result_time"] is not None and match["post_result_time"] > 0
def get_match_cc_metrics(season, match_data, color):
results = {}
for cc_name, cc_func in CC_METRICS[season]:
results[cc_name] = cc_func(match_data=match_data, alliance=color)
return results
def get_cc_metric(*, season, teams, matches, metric_name):
"""Create a calculated contribution (i.e. OPR) table"""
# Use linear equation of the form: m * x = s, where x is the calc contribution vector
m = []
s = []
for match in matches:
# Only consider qualification matches
if is_completed(match) and match["comp_level"] == "qm":
# For each alliance in each match
for color in ["red", "blue"]:
# Get the values of the desired metrics
scores = get_match_cc_metrics(
season=season, match_data=match, color=color
)
s.append(scores[metric_name])
# Populate the matrix that shows which teams participated in the match
row = [0] * len(teams)
for team_key in match["alliances"][color]["team_keys"]:
team = int(team_key[3:])
row[list(teams).index(team)] = 1
m.append(row)
# Normalize the overdetermined system of equations using least squares
m_norm = np.array(m).transpose() @ np.array(m)
s_norm = np.array(m).transpose() @ np.array(s)
# Solve for x
if m_norm.ndim == 2:
try:
cc_scores = linalg.solve(m_norm, s_norm)
cc_rms_error = (sum(((np.array(m) @ cc_scores) - np.array(s)) ** 2) / len(s)) ** 0.5
except LinAlgError:
print(f"Could not calculate {metric_name}")
return cc_scores, cc_rms_error
def get_cc_metrics_df(matches, event_id, teams):
"""Create a dataframe for the calculated contribution metrics"""
season = event_id[:4]
cc_df = | pd.DataFrame(index=teams) | pandas.DataFrame |
"""arbin res-type data files"""
import os
import sys
import tempfile
import shutil
import logging
import platform
import warnings
import time
import numpy as np
import pandas as pd
from cellpy.readers.core import (
FileID,
Cell,
check64bit,
humanize_bytes,
xldate_as_datetime,
)
from cellpy.parameters.internal_settings import HeaderDict, get_headers_normal
from cellpy.readers.instruments.mixin import Loader, MINIMUM_SELECTION
from cellpy import prms
DEBUG_MODE = prms.Reader.diagnostics
ALLOW_MULTI_TEST_FILE = False
# Select odbc module
ODBC = prms._odbc
SEARCH_FOR_ODBC_DRIVERS = prms._search_for_odbc_driver
use_subprocess = prms.Instruments.Arbin.use_subprocess
detect_subprocess_need = prms.Instruments.Arbin.detect_subprocess_need
# Finding out some stuff about the platform (TODO: refactor to mixin)
is_posix = False
is_macos = False
if os.name == "posix":
is_posix = True
current_platform = platform.system()
if current_platform == "Darwin":
is_macos = True
if DEBUG_MODE:
logging.debug("DEBUG_MODE")
logging.debug(f"ODBC: {ODBC}")
logging.debug(f"SEARCH_FOR_ODBC_DRIVERS: {SEARCH_FOR_ODBC_DRIVERS}")
logging.debug(f"use_subprocess: {use_subprocess}")
logging.debug(f"detect_subprocess_need: {detect_subprocess_need}")
logging.debug(f"current_platform: {current_platform}")
# TODO: refactor to mixin
if detect_subprocess_need:
logging.debug("detect_subprocess_need is True: checking versions")
python_version, os_version = platform.architecture()
if python_version == "64bit" and prms.Instruments.Arbin.office_version == "32bit":
logging.debug(
"python 64bit and office 32bit -> " "setting use_subprocess to True"
)
use_subprocess = True
if use_subprocess and not is_posix:
# The windows users most likely have a strange custom path to mdbtools etc.
logging.debug(
"using subprocess (most lilkely mdbtools) " "on non-posix (most likely windows)"
)
if not prms.Instruments.Arbin.sub_process_path:
sub_process_path = str(prms._sub_process_path)
else:
sub_process_path = str(prms.Instruments.Arbin.sub_process_path)
if is_posix:
sub_process_path = "mdb-export"
try:
driver_dll = prms.Instruments.Arbin.odbc_driver
except AttributeError:
driver_dll = None
# TODO: deprecate ado
use_ado = False
if ODBC == "ado":
use_ado = True
logging.debug("Trying to use adodbapi as ado loader")
try:
import adodbapi as dbloader # http://adodbapi.sourceforge.net/
except ImportError:
use_ado = False
if not use_ado:
if ODBC == "pyodbc":
try:
import pyodbc as dbloader
except ImportError:
warnings.warn("COULD NOT LOAD DBLOADER!", ImportWarning)
dbloader = None
elif ODBC == "pypyodbc":
try:
import pypyodbc as dbloader
except ImportError:
warnings.warn("COULD NOT LOAD DBLOADER!", ImportWarning)
dbloader = None
if DEBUG_MODE:
logging.debug(f"dbloader: {dbloader}")
# Names of the tables in the .res db that is used by cellpy
TABLE_NAMES = {
"normal": "Channel_Normal_Table",
"global": "Global_Table",
"statistic": "Channel_Statistic_Table",
"aux_global": "Aux_Global_Data_Table",
"aux": "Auxiliary_Table",
}
summary_headers_renaming_dict = {
"test_id_txt": "Test_ID",
"data_point_txt": "Data_Point",
"vmax_on_cycle_txt": "Vmax_On_Cycle",
"charge_time_txt": "Charge_Time",
"discharge_time_txt": "Discharge_Time",
}
normal_headers_renaming_dict = {
"aci_phase_angle_txt": "ACI_Phase_Angle",
"ref_aci_phase_angle_txt": "Reference_ACI_Phase_Angle",
"ac_impedance_txt": "AC_Impedance",
"ref_ac_impedance_txt": "Reference_AC_Impedance",
"charge_capacity_txt": "Charge_Capacity",
"charge_energy_txt": "Charge_Energy",
"current_txt": "Current",
"cycle_index_txt": "Cycle_Index",
"data_point_txt": "Data_Point",
"datetime_txt": "DateTime",
"discharge_capacity_txt": "Discharge_Capacity",
"discharge_energy_txt": "Discharge_Energy",
"internal_resistance_txt": "Internal_Resistance",
"is_fc_data_txt": "Is_FC_Data",
"step_index_txt": "Step_Index",
"sub_step_index_txt": "Sub_Step_Index", # new
"step_time_txt": "Step_Time",
"sub_step_time_txt": "Sub_Step_Time", # new
"test_id_txt": "Test_ID",
"test_time_txt": "Test_Time",
"voltage_txt": "Voltage",
"ref_voltage_txt": "Reference_Voltage", # new
"dv_dt_txt": "dV/dt",
"frequency_txt": "Frequency", # new
"amplitude_txt": "Amplitude", # new
}
class ArbinLoader(Loader):
""" Class for loading arbin-data from res-files.
Implemented Cellpy params (prms.Instruments.Arbin):
max_res_filesize
chunk_size
max_chunks
use_subprocess
detect_subprocess_need
sub_process_path
office_version
SQL_server
"""
def __init__(self):
"""initiates the ArbinLoader class"""
# could use __init__(self, cellpydata_object) and
# set self.logger = cellpydata_object.logger etc.
# then remember to include that as prm in "out of class" functions
# self.prms = prms
self.logger = logging.getLogger(__name__)
# use the following prm to limit to loading only
# one cycle or from cycle>x to cycle<x+n
# prms.Reader["limit_loaded_cycles"] = [cycle from, cycle to]
self.arbin_headers_normal = (
self.get_headers_normal()
) # the column headers defined by Arbin
self.cellpy_headers_normal = (
get_headers_normal()
) # the column headers defined by cellpy
self.arbin_headers_global = self.get_headers_global()
self.arbin_headers_aux_global = self.get_headers_aux_global()
self.arbin_headers_aux = self.get_headers_aux()
self.current_chunk = 0 # use this to set chunks to load
@staticmethod
def get_raw_units():
raw_units = dict()
raw_units["current"] = 1.0 # A
raw_units["charge"] = 1.0 # Ah
raw_units["mass"] = 0.001 # g
return raw_units
@staticmethod
def get_headers_normal():
"""Defines the so-called normal column headings for Arbin .res-files"""
headers = HeaderDict()
# - normal (raw-data) column headings (specific for Arbin)
headers["aci_phase_angle_txt"] = "ACI_Phase_Angle"
headers["ref_aci_phase_angle_txt"] = "Reference_ACI_Phase_Angle"
headers["ac_impedance_txt"] = "AC_Impedance"
headers["ref_ac_impedance_txt"] = "Reference_AC_Impedance" # new
headers["charge_capacity_txt"] = "Charge_Capacity"
headers["charge_energy_txt"] = "Charge_Energy"
headers["current_txt"] = "Current"
headers["cycle_index_txt"] = "Cycle_Index"
headers["data_point_txt"] = "Data_Point"
headers["datetime_txt"] = "DateTime"
headers["discharge_capacity_txt"] = "Discharge_Capacity"
headers["discharge_energy_txt"] = "Discharge_Energy"
headers["internal_resistance_txt"] = "Internal_Resistance"
headers["is_fc_data_txt"] = "Is_FC_Data"
headers["step_index_txt"] = "Step_Index"
headers["sub_step_index_txt"] = "Sub_Step_Index" # new
headers["step_time_txt"] = "Step_Time"
headers["sub_step_time_txt"] = "Sub_Step_Time" # new
headers["test_id_txt"] = "Test_ID"
headers["test_time_txt"] = "Test_Time"
headers["voltage_txt"] = "Voltage"
headers["ref_voltage_txt"] = "Reference_Voltage" # new
headers["dv_dt_txt"] = "dV/dt"
headers["frequency_txt"] = "Frequency" # new
headers["amplitude_txt"] = "Amplitude" # new
return headers
@staticmethod
def get_headers_aux():
"""Defines the so-called auxiliary table column headings for Arbin .res-files"""
headers = HeaderDict()
# - aux column headings (specific for Arbin)
headers["test_id_txt"] = "Test_ID"
headers["data_point_txt"] = "Data_Point"
headers["aux_index_txt"] = "Auxiliary_Index"
headers["data_type_txt"] = "Data_Type"
headers["x_value_txt"] = "X"
headers["x_dt_value"] = "dX_dt"
return headers
@staticmethod
def get_headers_aux_global():
"""Defines the so-called auxiliary global column headings for Arbin .res-files"""
headers = HeaderDict()
# - aux global column headings (specific for Arbin)
headers["channel_index_txt"] = "Channel_Index"
headers["aux_index_txt"] = "Auxiliary_Index"
headers["data_type_txt"] = "Data_Type"
headers["aux_name_txt"] = "Nickname"
headers["aux_unit_txt"] = "Unit"
return headers
@staticmethod
def get_headers_global():
"""Defines the so-called global column headings for Arbin .res-files"""
headers = HeaderDict()
# - global column headings (specific for Arbin)
headers["applications_path_txt"] = "Applications_Path"
headers["channel_index_txt"] = "Channel_Index"
headers["channel_number_txt"] = "Channel_Number"
headers["channel_type_txt"] = "Channel_Type"
headers["comments_txt"] = "Comments"
headers["creator_txt"] = "Creator"
headers["daq_index_txt"] = "DAQ_Index"
headers["item_id_txt"] = "Item_ID"
headers["log_aux_data_flag_txt"] = "Log_Aux_Data_Flag"
headers["log_chanstat_data_flag_txt"] = "Log_ChanStat_Data_Flag"
headers["log_event_data_flag_txt"] = "Log_Event_Data_Flag"
headers["log_smart_battery_data_flag_txt"] = "Log_Smart_Battery_Data_Flag"
headers["mapped_aux_conc_cnumber_txt"] = "Mapped_Aux_Conc_CNumber"
headers["mapped_aux_di_cnumber_txt"] = "Mapped_Aux_DI_CNumber"
headers["mapped_aux_do_cnumber_txt"] = "Mapped_Aux_DO_CNumber"
headers["mapped_aux_flow_rate_cnumber_txt"] = "Mapped_Aux_Flow_Rate_CNumber"
headers["mapped_aux_ph_number_txt"] = "Mapped_Aux_PH_Number"
headers["mapped_aux_pressure_number_txt"] = "Mapped_Aux_Pressure_Number"
headers["mapped_aux_temperature_number_txt"] = "Mapped_Aux_Temperature_Number"
headers["mapped_aux_voltage_number_txt"] = "Mapped_Aux_Voltage_Number"
headers[
"schedule_file_name_txt"
] = "Schedule_File_Name" # KEEP FOR CELLPY FILE FORMAT
headers["start_datetime_txt"] = "Start_DateTime"
headers["test_id_txt"] = "Test_ID" # KEEP FOR CELLPY FILE FORMAT
headers["test_name_txt"] = "Test_Name" # KEEP FOR CELLPY FILE FORMAT
return headers
@staticmethod
def get_raw_limits():
raw_limits = dict()
raw_limits["current_hard"] = 0.000_000_000_000_1
raw_limits["current_soft"] = 0.000_01
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 0.001
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits
def _get_res_connector(self, temp_filename):
if use_ado:
is64bit_python = check64bit(current_system="python")
if is64bit_python:
constr = (
"Provider=Microsoft.ACE.OLEDB.12.0; Data Source=%s" % temp_filename
)
else:
constr = (
"Provider=Microsoft.Jet.OLEDB.4.0; Data Source=%s" % temp_filename
)
return constr
if SEARCH_FOR_ODBC_DRIVERS:
logging.debug("Searching for odbc drivers")
try:
drivers = [
driver
for driver in dbloader.drivers()
if "Microsoft Access Driver" in driver
]
logging.debug(f"Found these: {drivers}")
driver = drivers[0]
except IndexError as e:
logging.debug(
"Unfortunately, it seems the " "list of drivers is emtpy."
)
logging.debug("Use driver-name from config (if existing).")
driver = driver_dll
if is_macos:
driver = "/usr/local/lib/libmdbodbc.dylib"
else:
if not driver:
print(
"\nCould not find any odbc-drivers suitable "
"for .res-type files. "
"Check out the homepage of pydobc for info on "
"installing drivers"
)
print(
"One solution that might work is downloading "
"the Microsoft Access database engine (in correct"
" bytes (32 or 64)) "
"from:\n"
"https://www.microsoft.com/en-us/download/"
"details.aspx?id=13255"
)
print(
"Or install mdbtools and set it up "
"(check the cellpy docs for help)"
)
print("\n")
else:
logging.debug("Using driver dll from config file")
logging.debug(f"driver dll: {driver}")
self.logger.debug(f"odbc constr: {driver}")
else:
is64bit_python = check64bit(current_system="python")
if is64bit_python:
driver = "{Microsoft Access Driver (*.mdb, *.accdb)}"
else:
driver = "Microsoft Access Driver (*.mdb)"
self.logger.debug("odbc constr: {}".format(driver))
constr = "Driver=%s;Dbq=%s" % (driver, temp_filename)
logging.debug(f"constr: {constr}")
return constr
def _clean_up_loadres(self, cur, conn, filename):
if cur is not None:
cur.close() # adodbapi
if conn is not None:
conn.close() # adodbapi
if os.path.isfile(filename):
try:
os.remove(filename)
except WindowsError as e:
self.logger.warning("could not remove tmp-file\n%s %s" % (filename, e))
def _post_process(self, data):
fix_datetime = True
set_index = True
rename_headers = True
# TODO: insert post-processing and div tests here
# - check dtypes
# Remark that we also set index during saving the file to hdf5 if
# it is not set.
if rename_headers:
columns = {}
for key in self.arbin_headers_normal:
old_header = normal_headers_renaming_dict[key]
new_header = self.cellpy_headers_normal[key]
columns[old_header] = new_header
data.raw.rename(index=str, columns=columns, inplace=True)
try:
# TODO: check if summary df is existing (to only check if it is
# empty will give an error later!)
columns = {}
for key, old_header in summary_headers_renaming_dict.items():
try:
columns[old_header] = self.cellpy_headers_normal[key]
except KeyError:
columns[old_header] = old_header.lower()
data.summary.rename(index=str, columns=columns, inplace=True)
except Exception as e:
logging.debug(f"Could not rename summary df ::\n{e}")
if fix_datetime:
h_datetime = self.cellpy_headers_normal.datetime_txt
logging.debug("converting to datetime format")
# print(data.raw.columns)
data.raw[h_datetime] = data.raw[h_datetime].apply(
xldate_as_datetime, option="to_datetime"
)
h_datetime = h_datetime
if h_datetime in data.summary:
data.summary[h_datetime] = data.summary[h_datetime].apply(
xldate_as_datetime, option="to_datetime"
)
if set_index:
hdr_data_point = self.cellpy_headers_normal.data_point_txt
if data.raw.index.name != hdr_data_point:
data.raw = data.raw.set_index(hdr_data_point, drop=False)
return data
def _inspect(self, run_data):
"""Inspect the file -> reports to log (debug)"""
if not any([DEBUG_MODE]):
return run_data
if DEBUG_MODE:
checked_rundata = []
for data in run_data:
new_cols = data.raw.columns
for col in self.arbin_headers_normal:
if col not in new_cols:
logging.debug(f"Missing col: {col}")
# data.raw[col] = np.nan
checked_rundata.append(data)
return checked_rundata
def _iterdump(self, file_name, headers=None):
"""
Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame
"""
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.arbin_headers_normal.step_index_txt
point_txt = self.arbin_headers_normal.data_point_txt
cycle_txt = self.arbin_headers_normal.cycle_index_txt
self.logger.debug("iterating through file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self._get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[
self.arbin_headers_global["start_datetime_txt"]
][test_no]
test_ID = int(
global_data_df[self.arbin_headers_normal.test_id_txt][test_no]
) # OBS
test_name = global_data_df[self.arbin_headers_global["test_name_txt"]][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns.extend(headers)
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.arbin_headers_normal.test_id_txt, test_ID)
sql_5 = "order by %s" % self.arbin_headers_normal.data_point_txt
import time
info_list = []
info_header = ["cycle", "row_count", "start_point", "end_point"]
info_header.extend(headers)
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
start_point = normal_df[point_txt].min()
end_point = normal_df[point_txt].max()
last = normal_df.iloc[-1, :]
step_list = [cycle_number, row_count, start_point, end_point]
step_list.extend([last[x] for x in headers])
info_list.append(step_list)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict
def investigate(self, file_name):
"""Investigate a .res file.
Args:
file_name: name of the file
Returns: dictionary with div. stats and info.
"""
step_txt = self.arbin_headers_normal.step_index_txt
point_txt = self.arbin_headers_normal.data_point_txt
cycle_txt = self.arbin_headers_normal.cycle_index_txt
self.logger.debug("investigating file: %s" % file_name)
if not os.path.isfile(file_name):
print("Missing file_\n %s" % file_name)
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
self.logger.info(txt)
table_name_global = TABLE_NAMES["global"]
table_name_stats = TABLE_NAMES["statistic"]
table_name_normal = TABLE_NAMES["normal"]
# creating temporary file and connection
temp_dir = tempfile.gettempdir()
temp_filename = os.path.join(temp_dir, os.path.basename(file_name))
shutil.copy2(file_name, temp_dir)
constr = self._get_res_connector(temp_filename)
if use_ado:
conn = dbloader.connect(constr)
else:
conn = dbloader.connect(constr, autocommit=True)
self.logger.debug("tmp file: %s" % temp_filename)
self.logger.debug("constr str: %s" % constr)
# --------- read global-data ------------------------------------
self.logger.debug("reading global data table")
sql = "select * from %s" % table_name_global
global_data_df = pd.read_sql_query(sql, conn)
# col_names = list(global_data_df.columns.values)
self.logger.debug("sql statement: %s" % sql)
tests = global_data_df[self.arbin_headers_normal.test_id_txt]
number_of_sets = len(tests)
self.logger.debug("number of datasets: %i" % number_of_sets)
self.logger.debug("only selecting first test")
test_no = 0
self.logger.debug("setting data for test number %i" % test_no)
loaded_from = file_name
# fid = FileID(file_name)
start_datetime = global_data_df[
self.arbin_headers_global["start_datetime_txt"]
][test_no]
test_ID = int(
global_data_df[self.arbin_headers_normal.test_id_txt][test_no]
) # OBS
test_name = global_data_df[self.arbin_headers_global["test_name_txt"]][test_no]
# --------- read raw-data (normal-data) -------------------------
self.logger.debug("reading raw-data")
columns = ["Data_Point", "Step_Index", "Cycle_Index"]
columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns)
sql_1 = "select %s " % columns_txt
sql_2 = "from %s " % table_name_normal
sql_3 = "where %s=%s " % (self.arbin_headers_normal.test_id_txt, test_ID)
sql_5 = "order by %s" % self.arbin_headers_normal.data_point_txt
import time
info_list = []
info_header = ["cycle", "step", "row_count", "start_point", "end_point"]
self.logger.info(" ".join(info_header))
self.logger.info("-------------------------------------------------")
for cycle_number in range(1, 2000):
t1 = time.time()
self.logger.debug("picking cycle %i" % cycle_number)
sql_4 = "AND %s=%i " % (cycle_txt, cycle_number)
sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5
self.logger.debug("sql statement: %s" % sql)
normal_df = pd.read_sql_query(sql, conn)
t2 = time.time()
dt = t2 - t1
self.logger.debug("time: %f" % dt)
if normal_df.empty:
self.logger.debug("reached the end")
break
row_count, _ = normal_df.shape
steps = normal_df[self.arbin_headers_normal.step_index_txt].unique()
txt = "cycle %i: %i [" % (cycle_number, row_count)
for step in steps:
self.logger.debug(" step: %i" % step)
step_df = normal_df.loc[normal_df[step_txt] == step]
step_row_count, _ = step_df.shape
start_point = step_df[point_txt].min()
end_point = step_df[point_txt].max()
txt += " %i-(%i)" % (step, step_row_count)
step_list = [cycle_number, step, step_row_count, start_point, end_point]
info_list.append(step_list)
txt += "]"
self.logger.info(txt)
self._clean_up_loadres(None, conn, temp_filename)
info_dict = pd.DataFrame(info_list, columns=info_header)
return info_dict
def repair(self, file_name):
"""try to repair a broken/corrupted file"""
raise NotImplemented
def dump(self, file_name, path):
"""Dumps the raw file to an intermediate hdf5 file.
This method can be used if the raw file is too difficult to load and it
is likely that it is more efficient to convert it to an hdf5 format
and then load it using the `from_intermediate_file` function.
Args:
file_name: name of the raw file
path: path to where to store the intermediate hdf5 file (optional)
Returns:
full path to stored intermediate hdf5 file
information about the raw file (needed by the
`from_intermediate_file` function)
"""
# information = None # contains information needed by the from_
# intermediate_file reader
# full_path = None
# return full_path, information
raise NotImplemented
def _query_table(self, table_name, conn, sql=None):
self.logger.debug(f"reading {table_name}")
if sql is None:
sql = f"select * from {table_name}"
self.logger.debug(f"sql statement: {sql}")
df = | pd.read_sql_query(sql, conn) | pandas.read_sql_query |
import pickle
from copy import deepcopy
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from scipy.stats import pearsonr
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tqdm import tqdm
from transformers import BertModel
from transformers import BertTokenizer
from util import get_args
class HybridModel(nn.Module):
def __init__(self, num_features, freeze_bert=False, is_regressor=True):
super(HybridModel, self).__init__()
# Instantiating BERT model object
self.bert_layer = BertModel.from_pretrained('bert-base-uncased')
self.num_features = num_features
self.is_regressor = is_regressor
# Freeze bert layers
if freeze_bert:
for p in self.bert_layer.parameters():
p.requires_grad = False
# classification layers
self.classification_layer = nn.Linear(768, self.num_features)
self.sigmoid = nn.Sigmoid()
# self.attention_layer = nn.Sequential(nn.Linear(768, self.num_features), nn.Softmax(dim=1))
self.regression_layer = nn.Linear(768, 1)
def set_as_classifier(self):
self.is_regressor = False
def set_as_regressor(self):
self.is_regressor = True
def forward(self, seq, attn_masks):
'''
Inputs:
-seq : Tensor of shape [B, T] containing token ids of sequences
-attn_masks : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
'''
# Feeding the input to BERT model to obtain contextualized representations
cont_reps, _ = self.bert_layer(seq, attention_mask=attn_masks)
# Obtaining the representation of [CLS] head
cls_rep = cont_reps[:, 0]
# Feeding cls_rep to the regression (when evaluating)/classification (when fine-tuning) layer
if self.is_regressor:
regression_score = self.regression_layer(cls_rep)
return regression_score
else:
affinity = self.classification_layer(cls_rep)
probs = self.sigmoid(affinity)
return probs
class SymmetryHybridDataset(Dataset):
def __init__(self, df, ling_fts, maxlen):
# Store the contents of the file in a pandas dataframe
self.df = df.reset_index(drop=True)
self.ling_fts = ling_fts
# Initialize the BERT tokenizer
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.maxlen = maxlen
def __len__(self):
return len(self.df)
def __getitem__(self, index):
# Selecting the sentence and label at the specified index in the data frame
sentence = self.df['sentence'][index]
sent_score = self.df['sentence average score'][index]
features = self.ling_fts[index]
# Preprocessing the text to be suitable for BERT
tokens = self.tokenizer.tokenize(sentence) # Tokenize the sentence
tokens = ['[CLS]'] + tokens + [
'[SEP]'] # Insering the CLS and SEP token in the beginning and end of the sentence
if len(tokens) < self.maxlen:
tokens = tokens + ['[PAD]' for _ in range(self.maxlen - len(tokens))] # Padding sentences
else:
tokens = tokens[:self.maxlen - 1] + ['[SEP]'] # Prunning the list to be of specified max length
tokens_ids = self.tokenizer.convert_tokens_to_ids(
tokens) # Obtaining the indices of the tokens in the BERT Vocabulary
tokens_ids_tensor = torch.tensor(tokens_ids) # Converting the list to a pytorch tensor
# Obtaining the attention mask i.e a tensor containing 1s for no padded tokens and 0s for padded ones
attn_mask = (tokens_ids_tensor != 0).long()
return tokens_ids_tensor, attn_mask, sent_score, features
def train(model, criterion, opti, train_loader, val_loader, max_eps=5, print_every=10, is_regression=False):
best_eval_loss = -1
best_model = model
# print("len of training data: ", len(train_loader.dataset.df))
for ep in range(max_eps):
model.train()
for it, (seq, attn_masks, sent_scores, features) in enumerate(train_loader):
# Clear gradients
opti.zero_grad()
# Converting these to cuda tensors
seq, attn_masks, sent_scores, features = seq.cuda(), attn_masks.cuda(), sent_scores.cuda(), features.cuda() # features: shape (batch_size, num_features)
# Computing loss
if is_regression:
scores = model(seq, attn_masks)
loss = criterion(scores.squeeze(-1), sent_scores.float())
else:
pred_features = model(seq, attn_masks)
loss = criterion(pred_features.squeeze(-1), features.float())
# Backpropagating the gradients
loss.backward()
# Optimization step
opti.step()
# print("Iteration {} of epoch {} complete.".format(it+1, ep+1))
if (it + 1) % print_every == 0:
print("Iteration {} of epoch {} complete. Train Loss : {}".format(it + 1, ep + 1, loss.item()))
# evaluation step after each epoch is done
eval_loss = evaluate(model, val_loader, criterion, is_regression)
print("Epoch {}, validation loss: {}".format(ep + 1, eval_loss))
if best_eval_loss == -1:
best_eval_loss = eval_loss
best_model = deepcopy(model)
elif eval_loss < best_eval_loss:
best_eval_loss = eval_loss
best_model = deepcopy(model)
print("best eval loss: ", best_eval_loss)
return best_model
def evaluate(model, val_loader, criterion, is_regression=True):
model.eval()
total_loss = []
for it, (seq, attn_masks, sent_scores, features) in enumerate(val_loader):
seq, attn_masks, sent_scores, features = seq.cuda(), attn_masks.cuda(), sent_scores.cuda(), features.cuda() # features: shape (batch_size, num_features)
if is_regression:
scores = model(seq, attn_masks)
loss = criterion(scores.squeeze(-1), sent_scores.float())
else:
pred_features = model(seq, attn_masks)
loss = criterion(pred_features.squeeze(-1), features.float())
total_loss.append(loss.item())
return np.array(total_loss).mean()
def predict_sent_scores(model, val_loader):
model.eval()
model.set_as_regressor()
total_loss = 0
scores = []
for it, (seq, attn_masks, sent_scores, features) in enumerate(val_loader):
seq, attn_masks, sent_scores, features = seq.cuda(), attn_masks.cuda(), sent_scores.cuda(), features.cuda() # features: shape (batch_size, num_features)
scores.append(model(seq, attn_masks).squeeze().cpu().detach().numpy())
return np.concatenate(scores)
def main(args):
cuda_available = torch.cuda.is_available()
if cuda_available:
device = torch.device("cuda")
torch.cuda.set_device(0)
print("using gpu acceleration")
ft_criterion = nn.BCELoss()
reg_criterion = nn.MSELoss()
num_features = 17
new_df = pd.read_csv('./data/sis.csv')
ling_fts = pickle.load(open('./data/sis_ling_fts.p', 'rb'))
pred_ids = list(set(list(new_df['pred_id'])))
# first training task: predicting feature labels
m = len(list(new_df['sentence']))
num_train = int(0.7 * m)
# print(num_train)
shuffled_idx = np.arange(m)
np.random.shuffle(shuffled_idx)
train_idx = shuffled_idx[:num_train]
eval_idx = shuffled_idx[num_train:]
train_df = new_df.loc[train_idx]
eval_df = new_df.loc[eval_idx]
train_ling_fts = ling_fts[train_idx]
eval_ling_fts = ling_fts[eval_idx]
train_set = SymmetryHybridDataset(train_df, train_ling_fts, maxlen=30)
eval_set = SymmetryHybridDataset(eval_df, eval_ling_fts, maxlen=30)
train_loader = DataLoader(train_set, batch_size=32, num_workers=4)
val_loader = DataLoader(eval_set, batch_size=32, num_workers=4)
classifier = HybridModel(num_features=num_features, freeze_bert=False).cuda()
classifier.set_as_classifier()
opti = optim.Adam(classifier.parameters(), lr=1e-5)
classifier = train(model=classifier, criterion=ft_criterion, opti=opti, train_loader=train_loader,
val_loader=val_loader, max_eps=20, is_regression=False)
torch.save(classifier, 'fine_tuned_bert_classifier.pth')
# second training task: sentence score regression
cuda_available = torch.cuda.is_available()
if cuda_available:
device = torch.device("cuda")
torch.cuda.set_device(0)
print("using gpu acceleration")
prediction_scores = []
classifier.set_as_regressor()
for pred_id in tqdm(pred_ids):
regressor = deepcopy(classifier)
regressor.set_as_regressor()
train_idx = list(new_df.index[new_df['pred_id'] != pred_id])
eval_idx = list(new_df.index[new_df['pred_id'] == pred_id])
train_df = new_df.loc[train_idx]
eval_df = new_df.loc[eval_idx]
train_ling_fts = ling_fts[train_idx]
eval_ling_fts = ling_fts[eval_idx]
train_set = SymmetryHybridDataset(train_df, train_ling_fts, maxlen=30)
eval_set = SymmetryHybridDataset(eval_df, eval_ling_fts, maxlen=30)
train_loader = DataLoader(train_set, batch_size=32, num_workers=4)
val_loader = DataLoader(eval_set, batch_size=32, num_workers=4)
regressor.set_as_regressor()
opti = optim.Adam(regressor.parameters(), lr=1e-4)
regressor = train(model=regressor, criterion=reg_criterion, opti=opti, train_loader=train_loader,
val_loader=val_loader, max_eps=30, is_regression=True)
# predict sentence scores for the eval verb
prediction_scores.append(predict_sent_scores(regressor, val_loader))
del regressor, opti
torch.cuda.empty_cache()
prediction_scores = np.concatenate(prediction_scores)
new_df['hybrid prediction score'] = | pd.Series(prediction_scores) | pandas.Series |
# coding: utf-8
# # Create figures for manuscript
#
# Generate figures for manuscript
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('load_ext', 'rpy2.ipython')
import rpy2
from rpy2.robjects.packages import importr
import os
import sys
import glob
import pickle
import pandas as pd
import numpy as np
import rpy2.robjects.lib.ggplot2 as ggplot2
from plotnine import (ggplot,
labs,
geom_line,
geom_point,
geom_errorbar,
aes,
ggsave,
theme_bw,
theme,
facet_wrap,
scale_color_manual,
guides,
guide_legend,
element_blank,
element_text,
element_rect,
element_line,
coords)
sys.path.append("../../")
from functions import utils
from sklearn.decomposition import PCA
import warnings
warnings.filterwarnings(action='ignore')
from numpy.random import seed
randomState = 123
seed(randomState)
# In[ ]:
# Read in config variables
config_file = os.path.abspath(os.path.join(os.getcwd(),"../../configs", "config_Human_sample.tsv"))
params = utils.read_config(config_file)
# In[ ]:
# Load parameters
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
analysis_name = params["analysis_name"]
correction_method = params["correction_method"]
lst_num_experiments = params["lst_num_experiments"]
pca_ind = [0,1,2,-3,-2,-1]
# ## Load data
# In[2]:
# File directories
base_dir = os.path.abspath(
os.path.join(
os.getcwd(), "../.."))
similarity_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_similarity_uncorrected_"+correction_method+".pickle")
ci_uncorrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_ci_uncorrected_"+correction_method+".pickle")
similarity_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_similarity_corrected_"+correction_method+".pickle")
ci_corrected_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_ci_corrected_"+correction_method+".pickle")
permuted_score_file = os.path.join(
base_dir,
"results",
"saved_variables",
dataset_name +"_sample_lvl_sim_permuted.npy")
compendia_dir = os.path.join(
local_dir,
"experiment_simulated",
analysis_name)
# In[3]:
# Output files
svcca_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_svcca_"+correction_method+".svg")
svcca_png_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_svcca_"+correction_method+".png")
pca_uncorrected_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_pca_uncorrected_"+correction_method+".png")
pca_corrected_file = os.path.join(
base_dir,
"results",
dataset_name +"_sample_lvl_sim_pca_corrected_"+correction_method+".png")
# In[4]:
# Load pickled files
uncorrected_svcca = pd.read_pickle(similarity_uncorrected_file)
err_uncorrected_svcca = pd.read_pickle(ci_uncorrected_file)
corrected_svcca = pd.read_pickle(similarity_corrected_file)
err_corrected_svcca = pd.read_pickle(ci_corrected_file)
permuted_score = np.load(permuted_score_file)
# In[5]:
# Concatenate error bars
uncorrected_svcca_err = pd.concat([uncorrected_svcca, err_uncorrected_svcca], axis=1)
corrected_svcca_err = pd.concat([corrected_svcca, err_corrected_svcca], axis=1)
# In[6]:
# Add group label
uncorrected_svcca_err['Group'] = 'uncorrected'
corrected_svcca_err['Group'] = 'corrected'
# In[7]:
# Concatenate dataframes
all_svcca = pd.concat([uncorrected_svcca_err, corrected_svcca_err])
all_svcca
# ## SVCCA panel
# In[8]:
# Plot
lst_num_experiments = list(all_svcca.index)
threshold = pd.DataFrame(
pd.np.tile(
permuted_score,
(len(lst_num_experiments), 1)),
index=lst_num_experiments,
columns=['score'])
panel_A = ggplot(all_svcca) + geom_line(all_svcca,
aes(x=lst_num_experiments, y='score', color='Group'),
size=1.5) \
+ geom_point(aes(x=lst_num_experiments, y='score'),
color ='darkgrey',
size=0.5) \
+ geom_errorbar(all_svcca,
aes(x=lst_num_experiments, ymin='ymin', ymax='ymax'),
color='darkgrey') \
+ geom_line(threshold,
aes(x=lst_num_experiments, y='score'),
linetype='dashed',
size=1.5,
color="darkgrey",
show_legend=False) \
+ labs(x = "Number of Partitions",
y = "Similarity score (SVCCA)",
title = "Similarity across varying numbers of partitions") \
+ theme(plot_title=element_text(weight='bold'),
plot_background=element_rect(fill="white"),
panel_background=element_rect(fill="white"),
panel_grid_major_x=element_line(color="lightgrey"),
panel_grid_major_y=element_line(color="lightgrey"),
axis_line=element_line(color="grey"),
legend_key=element_rect(fill='white', colour='white')
) \
+ scale_color_manual(['#1976d2', '#b3e5fc'])
print(panel_A)
ggsave(plot=panel_A, filename=svcca_file, device="svg", dpi=300)
ggsave(plot=panel_A, filename=svcca_png_file, device="svg", dpi=300)
# ## Uncorrected PCA panel
# In[9]:
lst_num_experiments = [lst_num_experiments[i] for i in pca_ind]
all_data_df = pd.DataFrame()
# Get batch 1 data
experiment_1_file = os.path.join(
compendia_dir,
"Experiment_1_0.txt.xz")
experiment_1 = pd.read_table(
experiment_1_file,
header=0,
index_col=0,
sep='\t')
for i in lst_num_experiments:
print('Plotting PCA of 1 experiment vs {} experiments...'.format(i))
# Simulated data with all samples in a single batch
original_data_df = experiment_1.copy()
# Add grouping column for plotting
original_data_df['num_experiments'] = '1'
# Get data with additional batch effects added
experiment_other_file = os.path.join(
compendia_dir,
"Experiment_"+str(i)+"_0.txt.xz")
experiment_other = pd.read_table(
experiment_other_file,
header=0,
index_col=0,
sep='\t')
# Simulated data with i batch effects
experiment_data_df = experiment_other
# Add grouping column for plotting
experiment_data_df['num_experiments'] = 'multiple'
# Concatenate datasets together
combined_data_df = pd.concat([original_data_df, experiment_data_df])
# PCA projection
pca = PCA(n_components=2)
# Encode expression data into 2D PCA space
combined_data_numeric_df = combined_data_df.drop(['num_experiments'], axis=1)
combined_data_PCAencoded = pca.fit_transform(combined_data_numeric_df)
combined_data_PCAencoded_df = pd.DataFrame(combined_data_PCAencoded,
index=combined_data_df.index,
columns=['PC1', 'PC2']
)
# Variance explained
print(pca.explained_variance_ratio_)
# Add back in batch labels (i.e. labels = "batch_"<how many batch effects were added>)
combined_data_PCAencoded_df['num_experiments'] = combined_data_df['num_experiments']
# Add column that designates which batch effect comparision (i.e. comparison of 1 batch vs 5 batches
# is represented by label = 5)
combined_data_PCAencoded_df['comparison'] = str(i)
# Concatenate ALL comparisons
all_data_df = pd.concat([all_data_df, combined_data_PCAencoded_df])
# In[10]:
# Convert 'num_experiments' into categories to preserve the ordering
lst_num_experiments_str = [str(i) for i in lst_num_experiments]
num_experiments_cat = pd.Categorical(all_data_df['num_experiments'], categories=['1', 'multiple'])
# Convert 'comparison' into categories to preserve the ordering
comparison_cat = pd.Categorical(all_data_df['comparison'], categories=lst_num_experiments_str)
# Assign to a new column in the df
all_data_df = all_data_df.assign(num_experiments_cat = num_experiments_cat)
all_data_df = all_data_df.assign(comparison_cat = comparison_cat)
# In[11]:
all_data_df.columns = ['PC1', 'PC2', 'num_experiments', 'comparison', 'No. of experiments', 'Comparison']
# In[12]:
# Plot all comparisons in one figure
panel_B = ggplot(all_data_df[all_data_df['Comparison'] != '1'],
aes(x='PC1', y='PC2')) \
+ geom_point(aes(color='No. of experiments'),
alpha=0.1) \
+ facet_wrap('~Comparison') \
+ labs(x = "PC 1",
y = "PC 2",
title = "PCA of experiment 1 vs multiple experiments") \
+ theme_bw() \
+ theme(
legend_title_align = "center",
plot_background=element_rect(fill='white'),
legend_key=element_rect(fill='white', colour='white'),
plot_title=element_text(weight='bold')
) \
+ guides(colour=guide_legend(override_aes={'alpha': 1})) \
+ scale_color_manual(['#bdbdbd', '#b3e5fc']) \
+ geom_point(data=all_data_df[all_data_df['Comparison'] == '1'],
alpha=0.1,
color='#bdbdbd')
print(panel_B)
ggsave(plot=panel_B, filename=pca_uncorrected_file, dpi=500)
# ## Corrected PCA panel
# In[13]:
lst_num_experiments = [lst_num_experiments[i] for i in pca_ind]
all_corrected_data_df = pd.DataFrame()
# Get batch 1 data
experiment_1_file = os.path.join(
compendia_dir,
"Experiment_corrected_1_0.txt.xz")
experiment_1 = pd.read_table(
experiment_1_file,
header=0,
index_col=0,
sep='\t')
# Transpose data to df: sample x gene
experiment_1 = experiment_1.T
for i in lst_num_experiments:
print('Plotting PCA of 1 experiment vs {} experiments...'.format(i))
# Simulated data with all samples in a single batch
original_data_df = experiment_1.copy()
# Match format of column names in before and after df
original_data_df.columns = original_data_df.columns.astype(str)
# Add grouping column for plotting
original_data_df['num_experiments'] = '1'
# Get data with additional batch effects added and corrected
experiment_other_file = os.path.join(
compendia_dir,
"Experiment_corrected_"+str(i)+"_0.txt.xz")
experiment_other = pd.read_table(
experiment_other_file,
header=0,
index_col=0,
sep='\t')
# Transpose data to df: sample x gene
experiment_other = experiment_other.T
# Simulated data with i batch effects that are corrected
experiment_data_df = experiment_other
# Match format of column names in before and after df
experiment_data_df.columns = experiment_data_df.columns.astype(str)
# Add grouping column for plotting
experiment_data_df['num_experiments'] = 'multiple'
# Concatenate datasets together
combined_data_df = pd.concat([original_data_df, experiment_data_df])
# PCA projection
pca = PCA(n_components=2)
# Encode expression data into 2D PCA space
combined_data_numeric_df = combined_data_df.drop(['num_experiments'], axis=1)
combined_data_PCAencoded = pca.fit_transform(combined_data_numeric_df)
combined_data_PCAencoded_df = pd.DataFrame(combined_data_PCAencoded,
index=combined_data_df.index,
columns=['PC1', 'PC2']
)
# Add back in batch labels (i.e. labels = "batch_"<how many batch effects were added>)
combined_data_PCAencoded_df['num_experiments'] = combined_data_df['num_experiments']
# Add column that designates which batch effect comparision (i.e. comparison of 1 batch vs 5 batches
# is represented by label = 5)
combined_data_PCAencoded_df['comparison'] = str(i)
# Concatenate ALL comparisons
all_corrected_data_df = pd.concat([all_corrected_data_df, combined_data_PCAencoded_df])
# In[14]:
# Convert 'num_experiments' into categories to preserve the ordering
lst_num_experiments_str = [str(i) for i in lst_num_experiments]
num_experiments_cat = | pd.Categorical(all_corrected_data_df['num_experiments'], categories=['1', 'multiple']) | pandas.Categorical |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": | pandas.StringDtype() | pandas.StringDtype |
# %%
from functools import reduce
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
pd.set_option("display.max_rows", None)
pd.set_option("display.max_columns", None)
# %%
def build_gvkeys(prc, fund):
gvkeys_fund = fund.gvkey.unique()
gvkeys_prc = prc[prc.close > 5].gvkey.unique()
gvkeys = np.intersect1d(gvkeys_fund, gvkeys_prc)
return gvkeys
def fill_year(df):
first_date = df["date"].iloc[0]
last_date = df["date"].iloc[-1]
date_index = pd.date_range(
pd.to_datetime(first_date),
pd.to_datetime(last_date) + DateOffset(years=1),
freq="M",
name="date",
)
return (
df.drop("gvkey", axis=1)
.set_index("date")
.sort_index()
.reindex(date_index, method="ffill")
)
def fill_month(df):
first_date = df["date"].iloc[0]
last_date = df["date"].iloc[-1]
date_index = pd.date_range(
pd.to_datetime(first_date),
| pd.to_datetime(last_date) | pandas.to_datetime |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, | Timestamp('2008-10-23 05:53:11') | pandas.Timestamp |
import os
import ssl
from datetime import date
import json
import pandas as pd
from azure.storage.blob import BlobServiceClient
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
ssl._create_default_https_context = ssl._create_unverified_context
def set_cwd_to_script():
dname = os.path.dirname(os.path.abspath(__file__))
os.chdir(dname)
def get_runs_container_client(account, container_name):
key = json.load(open("AZURE_STORAGE_CONNECTION_STRING.json"))
connect_str = key[account]
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
try:
container_client = blob_service_client.create_container(container_name)
except ResourceExistsError:
container_client = blob_service_client.get_container_client(container_name)
return container_client
def delete_runs_blob(account, container_name="crude-run-data"):
container_client = get_runs_container_client(account, container_name)
try:
blob_client = container_client.get_blob_client("runs.json")
blob_client.delete_blob()
print("deleted "+account+" blob")
except:
print("cant delete "+account+" blob!")
raise
def upload_crude_run_blob(account, file_name, upload_blob, container_name="crude-run-data"):
container_client = get_runs_container_client(account, container_name)
blob_client = container_client.get_blob_client("runs.json")
try:
properties = blob_client.get_blob_properties()
blob_exists = True
except ResourceNotFoundError:
blob_exists = False
if upload_blob or not blob_exists:
print("starting blob upload to "+account+"...")
with open("./"+file_name, "rb") as data:
blob_client = container_client.upload_blob(name=file_name, data=data, overwrite=True)
print("completed blob upload to "+account)
def get_data(account, file_name):
# get the existing local data for last date comparison
if os.path.isfile("./"+file_name):
current = json.load(open(file_name))
current = | pd.read_json(current["data"], convert_dates=["d"]) | pandas.read_json |
from os import name
from pathlib import Path
import pandas as pd
import numpy as np
import gffpandas.gffpandas as gffpd
from Bio import SeqIO, pairwise2
from Bio.SeqRecord import SeqRecord
from Bio.SeqUtils import seq3
from BCBio import GFF
from Bio.Seq import MutableSeq, Seq
from dna_features_viewer import BiopythonTranslator
from Bio.SeqFeature import SeqFeature, FeatureLocation
import matplotlib.pyplot as plt
from textwrap import wrap
def convert_list_column_to_columns(df, col, output_names, deliminator):
col_list = df[col].to_numpy()
col_arr = np.empty((len(col_list), len(output_names)), dtype=object)
r = range(len(output_names))
for i in range(len(col_list)):
s = col_list[i]
if s is None:
elements = [None] * len(output_names)
else:
elements = s.split(deliminator)
for j in r:
col_arr[i][j] = elements[j]
return col_arr
def convert_dict_column_to_columns(df, col, output_names, element_deliminator, key_value_deliminator, DEBUG=False):
col_list = df[col].to_numpy()
col_arr = np.empty((len(col_list), len(output_names)), dtype=object)
for i in range(len(col_list)):
row_dict = dict(ele.split("=") for ele in col_list[i].split(element_deliminator) if len(ele.split(key_value_deliminator))==2)
for j, col in enumerate(output_names):
col_arr[i][j] = row_dict.get(col)
if DEBUG and i % 10000 == 0: print(i, "\t", row_dict, "\t", col_arr[i])
return col_arr
def get_unique_INFO_elements(position_call_df, DEBUG=False):
info_list = position_call_df['INFO'].to_numpy()
info_columns = []
for i in range(info_list.size):
s = info_list[i]
if DEBUG and i % 10000 == 0: print(i, "\t", info_columns)
for ele in s.split(';'):
col = ele[:ele.find('=')]
if col not in info_columns:
info_columns.append(col)
return info_columns
def get_parsed_position_call_df(position_call_df, info_columns=None, DEBUG=False):
if info_columns is None:
info_columns = get_unique_INFO_elements(position_call_df)
if DEBUG: print('current columns:', position_call_df.columns)
if DEBUG: print('info columns:', info_columns)
info_arr = convert_dict_column_to_columns(position_call_df, 'INFO', info_columns, ';', '=', DEBUG=DEBUG)
position_call_df[info_columns] = info_arr
# must convert DP4 and PV4 to columns
dp4_columns = ['ref_forward_reads', 'ref_reverse_reads', 'alt_forward_reads', 'alt_reverse_reads']
dp4_arr = convert_list_column_to_columns(position_call_df, 'DP4', dp4_columns, ',')
position_call_df[dp4_columns] = dp4_arr
pv4_columns = ['strand_bias_pVal', 'baseQ_bias_pVal', 'mapQ_bias_pVal', 'tail_dist_bias_pVal']
pv4_arr = convert_list_column_to_columns(position_call_df, 'PV4', pv4_columns, ',')
position_call_df[pv4_columns] = pv4_arr
position_call_parsed_df = change_datatypes(position_call_df)
position_call_parsed_df['fwd_strand_coverage'] = position_call_parsed_df['ref_forward_reads'] + position_call_parsed_df['alt_forward_reads']
position_call_parsed_df['rev_strand_coverage'] = position_call_parsed_df['ref_reverse_reads'] + position_call_parsed_df['alt_reverse_reads']
if DEBUG: print(position_call_df.head())
if DEBUG: print(position_call_df.columns)
print(position_call_parsed_df.dtypes)
for col in position_call_parsed_df.columns:
print(col, '\t', position_call_parsed_df[col].unique()[:5])
return position_call_parsed_df
def change_datatypes(df):
all_columns_datatype_dict = {'POS':'uint64', 'ID':'str_', 'REF':'str_', 'ALT':'str_', 'QUAL':"float64",
'FILTER':"str_", 'INFO':"str_", 'FORMAT':"str_", 'DP':"uint64", 'FS':"float64",
'MQ0F':"float64", 'AF1':"float64", 'AC1':"float64", 'DP4':"str_", 'MQ':"float64",
'FQ':"float64", 'SGB':"float64", 'RPBZ':"float64", 'BQBZ':"float64", 'PV4':"str_",
'SCBZ':"float64", 'VDB':"float64", 'INDE':"str_", 'IDV':"float64", 'IMF':"float64",
'ref_forward_reads':'uint64', 'ref_reverse_reads':'uint64', 'alt_forward_reads':'uint64', 'alt_reverse_reads':'uint64',
'strand_bias_pVal':"float64", 'baseQ_bias_pVal':"float64", 'mapQ_bias_pVal':"float64", 'tail_dist_bias_pVal':"float64",
'rev_strand_coverage':'uint64', 'fwd_strand_coverage':'uint64'}
column_datatype_dict = {key:value for key, value in zip(all_columns_datatype_dict.keys(), all_columns_datatype_dict.values()) if key in list(df.columns)}
return df.astype(column_datatype_dict)
def get_position_call_df(all_calls_path):
raw_df = pd.read_csv(all_calls_path, sep='\t')
narrow_df = raw_df[['POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT']]
return narrow_df
def get_gff_df(gff_path):
annotation = gffpd.read_gff3(gff_path)
attributes_df = annotation.attributes_to_columns()
return attributes_df
def get_position_call_df_all_samples(all_calls_dir):
dfs2concat = []
for sample_path in all_calls_dir.iterdir():
sample_name = sample_path.stem.split('_')[0]
sample_df = get_position_call_df(sample_path)
col_order = list(sample_df.columns)
sample_df['genome_name'] = sample_name
col_order.insert(0, 'genome_name')
sample_df = sample_df[col_order]
print(sample_df.columns)
dfs2concat.append(sample_df)
return | pd.concat(dfs2concat, axis=0, ignore_index=True) | pandas.concat |
import csv
import logging
from pathlib import Path
import tarfile
from typing import Dict
import pandas as pd
from guesslangtools.common import (
Config, File, cached, download_file, CSV_FIELD_LIMIT
)
LOGGER = logging.getLogger(__name__)
# Open source projects dataset: https://zenodo.org/record/3626071/
DATASET_FILENAME = (
'libraries-1.6.0-2020-01-12/repositories-1.6.0-2020-01-12.csv'
)
DATASET_URL = (
'https://zenodo.org/record/3626071/files/'
'libraries-1.6.0-2020-01-12.tar.gz?download=1'
)
PKG_ROOT = Path(__file__).parent.parent
OTHER_REPO_DATASET_PATH = PKG_ROOT.joinpath('data', 'other_repositories.csv')
@cached(File.COMPRESSED_DATASET)
def download(config: Config) -> None:
LOGGER.info('Retrieving repositories dataset (8GB)')
LOGGER.info('This operation might take a lot of time...')
destination = config.absolute(File.COMPRESSED_DATASET)
download_file(DATASET_URL, destination)
@cached(File.DATASET)
def extract(config: Config) -> None:
LOGGER.info('Extracting repositories list file')
LOGGER.info('This operation might take several minutes...')
compressed_filename = config.absolute(File.COMPRESSED_DATASET)
with tarfile.open(compressed_filename) as tar:
tar.extract(DATASET_FILENAME, path=config.absolute('.'))
extracted_file = config.absolute(DATASET_FILENAME)
extracted_file.rename(config.absolute(File.DATASET))
@cached(File.SHRUNK_DATASET)
def shrink(config: Config) -> None:
LOGGER.info('Shrink repositories list file')
LOGGER.info('This operation might take several minutes...')
input_path = config.absolute(File.DATASET)
output_path = config.absolute(File.SHRUNK_DATASET)
# The input dataset is too huge to be fully loaded into memory
csv.field_size_limit(CSV_FIELD_LIMIT)
with input_path.open() as input_file, output_path.open('w') as output_file:
reader = csv.DictReader(input_file)
fieldnames = ['repository_name', 'repository_language']
writer = csv.DictWriter(output_file, fieldnames=fieldnames)
writer.writeheader()
for item in reader:
if _ignore(item):
continue
smaller_item = {
'repository_name': item['Name with Owner'],
'repository_language': item['Language'],
}
writer.writerow(smaller_item)
def _ignore(item: Dict[str, str]) -> bool:
return (
item['Fork'] == 'true'
or item['Host Type'] != 'GitHub'
or not item['Name with Owner']
)
@cached(File.ALTERED_DATASET)
def alter(config: Config) -> None:
LOGGER.info('Alter repositories list file')
LOGGER.info('This operation might take several minutes...')
output_path = config.absolute(File.ALTERED_DATASET)
df = config.load_csv(File.SHRUNK_DATASET)
# Set repositories with no language as Markdown repositories.
# Because most of Github repositories have a Readme.md file.
mask = df['repository_language'].isnull()
df.loc[mask, 'repository_language'] = 'Markdown'
# Handle language aliases
for alias, languages in config.alias_mapping.items():
lang = languages[0]
mask = df['repository_language'] == alias
df.loc[mask, 'repository_language'] = lang
# There are too few repositories for some languages.
# To mitigate this problem, a list of known repositories
# is added to the dataset.
other_df = pd.read_csv(OTHER_REPO_DATASET_PATH)
df = | pd.concat([other_df, df]) | pandas.concat |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, | pd.Timestamp("2015-01-09") | pandas.Timestamp |
import pandas as pd
import numpy as np
import scipy.sparse as spl
from concurrent.futures import ProcessPoolExecutor
import sys
threads = 4
all_tasks = [
[5, 8000, ['5t', '5nt'], 0.352],
[10, 12000, ['10t', '10nt'], 0.38],
[25, 40000, ['25f'], 0.43386578246281293],
[25, 9000, ['25r'], 0.4],
[100, 4000, ['100r'], 0.39],
]
split, knn_k, test_task, powb = all_tasks[int(sys.argv[1])]
def recode(column, min_val=0):
uniques = column.unique()
codes = range(min_val, len(uniques) + min_val)
code_map = dict(zip(uniques, codes))
return (column.map(code_map), code_map)
def reverse_code(column, code_map):
inv_map = {v: k for k, v in code_map.items()}
return column.map(inv_map)
playlist_meta = | pd.read_csv('data/million_playlist_dataset/playlist_meta.csv') | pandas.read_csv |
#!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: <EMAIL>
@file: cluster.py
@time: 8/15/2017 10:38
@desc:
"""
import os
from datetime import datetime
from multiprocessing import Pool, Process
import pandas as pd
from component.factory import AttribFactory
from node import Node
from utils.commonOps import get_paths
class Cluster(Node):
"""
Each PAT data and/or TPCx-BB log corresponding to a Cluster, Cluster consists of multiple Nodes
"""
def __init__(self, pat_path):
"""
Each cluster should have a corresponding PAT data file, given the file initially will get all
the nodes in the cluster and attributes of the node
:param pat_path: PAT file path
"""
self.pat_path = pat_path + os.sep + 'instruments'
self.nodes = self.get_nodes()
self.attrib = Node(self.nodes[0]).node_exist_attrib()
def get_nodes(self):
"""
Get all the nodes' PAT file path in this cluster
:return: all nodes' PAT file path
"""
if os.path.exists(self.pat_path):
return get_paths(self.pat_path)
else:
print('Path: {0} does not exist, will exit...'.format(self.pat_path))
exit(-1)
def get_cluster_data_by_time(self, start, end, save_raw=False):
"""
Get average value of each attribute of all the nodes in the cluster
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:param save_raw: Boolean, whether to save all raw data
:return: all nodes average value within the given time pair
"""
cluster_avg = {}
for attrib in self.attrib:
tmp_avg = pd.DataFrame()
tmp_all = pd.DataFrame()
raw_path = self.pat_path + os.sep + attrib + '.csv'
for node in self.nodes:
tmp = Node(node).get_attrib_data_by_time(attrib, start, end)
tmp_avg = tmp_avg.append(tmp[0])
tmp_all = tmp_all.append(tmp[1])
if save_raw:
tmp_all.index = pd.to_datetime(tmp_all.index, unit='s')
tmp_all.to_csv(raw_path, sep=',')
avg = pd.DataFrame()
for i in range(len(start)):
avg = avg.append(tmp_avg.loc[i].mean(axis=0), ignore_index=True)
cluster_avg[attrib] = avg
return cluster_avg
def get_node_attrib_data_by_time(self, file_path, attrib, start, end):
"""
Get data of a given attribute within a given time period
:param attrib: input attribute
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:return: dict that contains avg value and all raw data of all the timestamp pair
"""
if attrib.lower() in AttribFactory.node_attrib.keys():
attrib_file = file_path + os.sep + AttribFactory.node_attrib[attrib.lower()]
if os.path.isfile(attrib_file):
return AttribFactory.create_attrib(attrib, attrib_file).get_data_by_time(start, end)
else:
print('node does not have attribute {0}'.format(attrib))
exit(-1)
else:
print('Node does not have attrib: {0} defined, defined attributions are: {1}, will exit...' \
.format(attrib, AttribFactory.node_attrib.keys()))
exit(-1)
def get_cluster_attrib_data(self, attrib, start, end):
pool = Pool(4)
pool.map(self.get_node_attrib_data_by_time())
tmp_avg = | pd.DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
# with AM goes through split-path, loses dtype
@td.skip_array_manager_not_yet_implemented
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype)
ser = df["A"]
arr = ser._values
msg = "|".join(
[
r"int\(\) argument must be a string, a bytes-like object or a "
"(real )?number, not 'NaTType'",
r"timedelta64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
r"datetime64\[ns\] cannot be converted to an? (Floating|Integer)Dtype",
"object cannot be converted to a FloatingDtype",
"'values' contains non-numeric NA",
]
)
with pytest.raises(TypeError, match=msg):
arr[0] = null
with pytest.raises(TypeError, match=msg):
arr[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser[0] = null
with pytest.raises(TypeError, match=msg):
ser[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
ser.iloc[0] = null
with pytest.raises(TypeError, match=msg):
ser.iloc[:2] = [null, null]
with pytest.raises(TypeError, match=msg):
df.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df.iloc[:2, 0] = [null, null]
# Multi-Block
df2 = df.copy()
df2["B"] = ser.copy()
with pytest.raises(TypeError, match=msg):
df2.iloc[0, 0] = null
with pytest.raises(TypeError, match=msg):
df2.iloc[:2, 0] = [null, null]
def test_loc_expand_empty_frame_keep_index_name(self):
# GH#45621
df = DataFrame(columns=["b"], index=Index([], name="a"))
df.loc[0] = 1
expected = DataFrame({"b": [1]}, index=Index([0], name="a"))
tm.assert_frame_equal(df, expected)
def test_loc_expand_empty_frame_keep_midx_names(self):
# GH#46317
df = DataFrame(
columns=["d"], index=MultiIndex.from_tuples([], names=["a", "b", "c"])
)
df.loc[(1, 2, 3)] = "foo"
expected = DataFrame(
{"d": ["foo"]},
index=MultiIndex.from_tuples([(1, 2, 3)], names=["a", "b", "c"]),
)
tm.assert_frame_equal(df, expected)
class TestDataFrameIndexingUInt64:
def test_setitem(self, uint64_frame):
df = uint64_frame
idx = df["A"].rename("foo")
# setitem
assert "C" not in df.columns
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
assert "D" not in df.columns
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(
df2.dtypes,
Series(
[np.dtype("uint64"), np.dtype("O"), np.dtype("O")],
index=["A", "B", "C"],
),
)
def test_object_casting_indexing_wraps_datetimelike(using_array_manager):
# GH#31649, check the indexing methods all the way down the stack
df = DataFrame(
{
"A": [1, 2],
"B": date_range("2000", periods=2),
"C": pd.timedelta_range("1 Day", periods=2),
}
)
ser = df.loc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.iloc[0]
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
ser = df.xs(0, axis=0)
assert isinstance(ser.values[1], Timestamp)
assert isinstance(ser.values[2], pd.Timedelta)
if using_array_manager:
# remainder of the test checking BlockManager internals
return
mgr = df._mgr
mgr._rebuild_blknos_and_blklocs()
arr = mgr.fast_xs(0)
assert isinstance(arr[1], Timestamp)
assert isinstance(arr[2], pd.Timedelta)
blk = mgr.blocks[mgr.blknos[1]]
assert blk.dtype == "M8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, Timestamp)
blk = mgr.blocks[mgr.blknos[2]]
assert blk.dtype == "m8[ns]" # we got the right block
val = blk.iget((0, 0))
assert isinstance(val, pd.Timedelta)
msg1 = r"Cannot setitem on a Categorical with a new category( \(.*\))?, set the"
msg2 = "Cannot set a Categorical with another, without identical categories"
class TestLocILocDataFrameCategorical:
@pytest.fixture
def orig(self):
cats = Categorical(["a", "a", "a", "a", "a", "a", "a"], categories=["a", "b"])
idx = Index(["h", "i", "j", "k", "l", "m", "n"])
values = [1, 1, 1, 1, 1, 1, 1]
orig = DataFrame({"cats": cats, "values": values}, index=idx)
return orig
@pytest.fixture
def exp_single_row(self):
# The expected values if we change a single row
cats1 = Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"])
idx1 = Index(["h", "i", "j", "k", "l", "m", "n"])
values1 = [1, 1, 2, 1, 1, 1, 1]
exp_single_row = DataFrame({"cats": cats1, "values": values1}, index=idx1)
return exp_single_row
@pytest.fixture
def exp_multi_row(self):
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
return exp_multi_row
@pytest.fixture
def exp_parts_cats_col(self):
# changed part of the cats column
cats3 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx3 = Index(["h", "i", "j", "k", "l", "m", "n"])
values3 = [1, 1, 1, 1, 1, 1, 1]
exp_parts_cats_col = DataFrame({"cats": cats3, "values": values3}, index=idx3)
return exp_parts_cats_col
@pytest.fixture
def exp_single_cats_value(self):
# changed single value in cats col
cats4 = | Categorical(["a", "a", "b", "a", "a", "a", "a"], categories=["a", "b"]) | pandas.Categorical |
# Example data analysis in Pandas
# Data from Kaggle https://www.kaggle.com/mchirico/montcoalert
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('ggplot')
df = | pd.read_csv('911.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# ## Brain Tumor Classification
# In[3]:
pwd
# In[4]:
path='E:\\DataScience\\MachineLearning\\Brain_Tumor_Data'
# In[5]:
import os
os.listdir(path)
# In[6]:
#importing lib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
import plotly.express as px
from wordcloud import WordCloud
from scipy import signal
#to supress warning
import warnings
warnings.filterwarnings('ignore')
# setting up the chart size and background
plt.rcParams['figure.figsize'] = (16, 8)
plt.style.use('fivethirtyeight')
# In[7]:
df = pd.read_csv(path+"\\data.csv")
# In[8]:
df.head(10)
# In[9]:
df.shape
# In[10]:
df.info()
# In[11]:
df.columns
# In[12]:
df.describe()
# In[13]:
df['y'].value_counts()
# In[14]:
df.tail(5)
# In[15]:
df.drop('Unnamed: 0', axis=1,inplace=True)
# In[16]:
df.head()
# ## Binaries Target Column
# In[17]:
target =pd.get_dummies(df['y'],dummy_na=True)
# In[18]:
target
# In[19]:
target = target.iloc[:,1]
# In[20]:
target
# In[21]:
df = pd.concat([df,target],axis=1)
# In[22]:
df.head()
# In[23]:
df.drop('y',axis=1,inplace=True)
# In[24]:
df.head(3)
# ## Feature_Engineering
# In[25]:
#importing feauture_selection from sklearn
# stastistical fuction --- chi
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2 ,f_classif
# In[26]:
# X - independent variable , y - dependent varibale
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
# In[27]:
X.shape
# In[28]:
X.head()
# In[29]:
# k=50 : selecting top=50 rows values which are highly co-related to taget
# using f_classif function
fs_f =SelectKBest(f_classif ,k=50)
X_f_classif = fs_f.fit_transform(X,y)
# In[30]:
#X_f_classif Selected
dfscores = pd.DataFrame(fs_f.scores_)
dfcolumns = | pd.DataFrame(X.columns) | pandas.DataFrame |
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output
import dash_table
app=dash.Dash(__name__)
titulo=html.H1("Modelo de Jerarquía Analítica AHP",style={'text-align':'center','font-family':'Arial Black','color':'blue'})
subtitulo=html.H2("Cuatro Criterios / Tres Alternativas",style={'text-align':'center','font-family':'Arial Black'})
nombre=html.H3('<NAME>',style={'font-family':'Arial'})
universidad=html.H3('Universidad Santiago de Cali',style={'font-family':'Arial'})
#Uso de Callbacks
A=dcc.Input(id='criterio1',value=1,type='number')
B=dcc.Input(id='criterio2',value=1,type='number')
C=dcc.Input(id='criterio3',value=1,type='number')
D=dcc.Input(id='criterio4',value=1,type='number')
E=dcc.Input(id='criterio5',value=1,type='number')
F=dcc.Input(id='criterio6',value=1,type='number')
#----------------------------------------------------------------------------
G=dcc.Input(id='c1alternativa1',value=1,type='number')
H=dcc.Input(id='c1alternativa2',value=1,type='number')
I=dcc.Input(id='c1alternativa3',value=1,type='number')
#----------------------------------------------------------------------------
J=dcc.Input(id='c2alternativa1',value=1,type='number')
K=dcc.Input(id='c2alternativa2',value=1,type='number')
L=dcc.Input(id='c2alternativa3',value=1,type='number')
#----------------------------------------------------------------------------
M=dcc.Input(id='c3alternativa1',value=1,type='number')
N=dcc.Input(id='c3alternativa2',value=1,type='number')
O=dcc.Input(id='c3alternativa3',value=1,type='number')
#----------------------------------------------------------------------------
P=dcc.Input(id='c4alternativa1',value=1,type='number')
Q=dcc.Input(id='c4alternativa2',value=1,type='number')
R=dcc.Input(id='c4alternativa3',value=1,type='number')
#----------------------------------------------------------------------------
resultado1=html.H3(id='micriterio1',children='')
texto1=html.H3(id='micriterio2',children='')
resultadotabla1=html.Div(id='tabla1criterio',children=dash_table.DataTable())
resultadotabla2=html.Div(id='tabla2criterio',children=dash_table.DataTable())
#-----------------------------------------------------------------------------
resultado2=html.H3(id='c1mialternativa1',children='')
texto2=html.H3(id='c1mialternativa2',children='')
resulc1alter1=html.Div(id='alternc1resul1',children=dash_table.DataTable())
resulc1alter2=html.Div(id='alternc1resul2',children=dash_table.DataTable())
#-----------------------------------------------------------------------------
resultado3=html.H3(id='c2mialternativa1',children='')
texto3=html.H3(id='c2mialternativa2',children='')
resulc2alter1=html.Div(id='alternc2resul1',children=dash_table.DataTable())
resulc2alter2=html.Div(id='alternc2resul2',children=dash_table.DataTable())
#-----------------------------------------------------------------------------
resultado4=html.H3(id='c3mialternativa1',children='')
texto4=html.H3(id='c3mialternativa2',children='')
resulc3alter1=html.Div(id='alternc3resul1',children=dash_table.DataTable())
resulc3alter2=html.Div(id='alternc3resul2',children=dash_table.DataTable())
#-----------------------------------------------------------------------------
resultado5=html.H3(id='c4mialternativa1',children='')
texto5=html.H3(id='c4mialternativa2',children='')
resulc4alter1=html.Div(id='alternc4resul1',children=dash_table.DataTable())
resulc4alter2=html.Div(id='alternc4resul2',children=dash_table.DataTable())
#-----------------------------------------------------------------------------
resulc8alter80=html.H3(id='tablitafinal0',children='')
resulc8alter81=html.H3(id='tablitafinal1',children='',style={'color':'red'})
resulc8alter82=html.H3(id='tablitafinal2',children='')
@app.callback([Output(component_id='micriterio1',component_property='children'),
Output(component_id='micriterio2',component_property='children'),
Output(component_id='tabla1criterio',component_property='children'),
Output(component_id='tabla2criterio',component_property='children'),
Output(component_id='c1mialternativa1',component_property='children'),
Output(component_id='c1mialternativa2',component_property='children'),
Output(component_id='alternc1resul1',component_property='children'),
Output(component_id='alternc1resul2',component_property='children'),
Output(component_id='c2mialternativa1',component_property='children'),
Output(component_id='c2mialternativa2',component_property='children'),
Output(component_id='alternc2resul1',component_property='children'),
Output(component_id='alternc2resul2',component_property='children'),
Output(component_id='c3mialternativa1',component_property='children'),
Output(component_id='c3mialternativa2',component_property='children'),
Output(component_id='alternc3resul1',component_property='children'),
Output(component_id='alternc3resul2',component_property='children'),
Output(component_id='c4mialternativa1',component_property='children'),
Output(component_id='c4mialternativa2',component_property='children'),
Output(component_id='alternc4resul1',component_property='children'),
Output(component_id='alternc4resul2',component_property='children'),
Output(component_id='tablitafinal0',component_property='children'),
Output(component_id='tablitafinal1',component_property='children'),
Output(component_id='tablitafinal2',component_property='children'),],
[Input(component_id='criterio1',component_property='value'),
Input(component_id='criterio2',component_property='value'),
Input(component_id='criterio3',component_property='value'),
Input(component_id='criterio4',component_property='value'),
Input(component_id='criterio5',component_property='value'),
Input(component_id='criterio6',component_property='value'),
Input(component_id='c1alternativa1',component_property='value'),
Input(component_id='c1alternativa2',component_property='value'),
Input(component_id='c1alternativa3',component_property='value'),
Input(component_id='c2alternativa1',component_property='value'),
Input(component_id='c2alternativa2',component_property='value'),
Input(component_id='c2alternativa3',component_property='value'),
Input(component_id='c3alternativa1',component_property='value'),
Input(component_id='c3alternativa2',component_property='value'),
Input(component_id='c3alternativa3',component_property='value'),
Input(component_id='c4alternativa1',component_property='value'),
Input(component_id='c4alternativa2',component_property='value'),
Input(component_id='c4alternativa3',component_property='value'),])
def miFuncion(dato1,dato2,dato3,dato4,dato5,dato6,c1dato1,c1dato2,c1dato3,c2dato1,c2dato2,c2dato3,c3dato1,c3dato2,c3dato3,c4dato1,c4dato2,c4dato3):
#Matriz comparacion criterios por pares
diag=1
C1_C2=dato1
C1_C3=dato2
C1_C4=dato3
C2_C3=dato4
C2_C4=dato5
C3_C4=dato6
C1_C2_Inv=1/C1_C2
C1_C3_Inv=1/C1_C3
C1_C4_Inv=1/C1_C4
C2_C3_Inv=1/C2_C3
C2_C4_Inv=1/C2_C4
C3_C4_Inv=1/C3_C4
matriz_inicial={
"C1":{"C1":diag,"C2":C1_C2,"C3":C1_C3,"C4":C1_C4},
"C2":{"C1":C1_C2_Inv,"C2":diag,"C3":C2_C3,"C4":C2_C4},
"C3":{"C1":C1_C3_Inv,"C2":C2_C3_Inv,"C3":diag,"C4":C3_C4},
"C4":{"C1":C1_C4_Inv,"C2":C2_C4_Inv,"C3":C3_C4_Inv,"C4":diag},
}
#Primer Paso del AHP
AHPStv_paso1=pd.DataFrame(matriz_inicial)
AHPStv_paso1=AHPStv_paso1.T
AHPStv_paso1_a=AHPStv_paso1.sum()
#Paso1a
AHPStv_paso1_a=pd.DataFrame(AHPStv_paso1_a)
AHPStv_paso1_a=AHPStv_paso1_a.T
AHPStv_paso1_b=AHPStv_paso1.div(AHPStv_paso1_a.iloc[0])
AHPStv_paso1_c=AHPStv_paso1_b.mean(axis=1)
AHPStv_paso1_c=pd.DataFrame(AHPStv_paso1_c)
AHPStv_paso1_c=AHPStv_paso1_c.T
#Calculo del lambda max
lambda_max=(AHPStv_paso1_a*AHPStv_paso1_c)
lambda_max=lambda_max.sum(axis=1)
CC=((lambda_max-4)/3)/0.900
respuesta1=html.H3(CC)
tablacriterio1df=dash_table.DataTable(
columns=[{'name':i,'id':i} for i in (AHPStv_paso1.columns)],
data=AHPStv_paso1.to_dict('records'),
)
tablacriterio2df=dash_table.DataTable(
columns=[{'name':i,'id':i} for i in (AHPStv_paso1_c.columns)],
data=AHPStv_paso1_c.to_dict('records'),
)
#Matriz comparacion alternativas por pares
diag=1
A1_A2=c1dato1
A1_A3=c1dato2
A2_A3=c1dato3
A1_A2_Inv=1/A1_A2
A1_A3_Inv=1/A1_A3
A2_A3_Inv=1/A2_A3
matriz_secundaria1={
'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3},
'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3},
'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag},
}
#Primer Paso del AHP
AHPStv_c1paso2=pd.DataFrame(matriz_secundaria1)
AHPStv_c1paso2=AHPStv_c1paso2.T
AHPStv_c1paso2_a=AHPStv_c1paso2.sum()
#Paso2a
AHPStv_c1paso2_a=pd.DataFrame(AHPStv_c1paso2_a)
AHPStv_c1paso2_a=AHPStv_c1paso2_a.T
AHPStv_c1paso2_b=AHPStv_c1paso2.div(AHPStv_c1paso2_a.iloc[0])
AHPStv_c1paso2_c=AHPStv_c1paso2_b.mean(axis=1)
AHPStv_c1paso2_c=pd.DataFrame(AHPStv_c1paso2_c)
AHPStv_c1paso2_c=AHPStv_c1paso2_c.T
#Calculo del lambda max
lambda_max=(AHPStv_c1paso2_a*AHPStv_c1paso2_c)
lambda_max=lambda_max.sum(axis=1)
CC2=((lambda_max-3)/2)/0.58
respuesta2=html.H3(CC2)
tablac1alternativa1df=dash_table.DataTable(
columns=[{'name':i,'id':i} for i in (AHPStv_c1paso2.columns)],
data=AHPStv_c1paso2.to_dict('records'),
)
tablac1alternativa2df=dash_table.DataTable(
columns=[{'name':i,'id':i} for i in (AHPStv_c1paso2_c.columns)],
data=AHPStv_c1paso2_c.to_dict('records'),
)
#Matriz comparacion alternativas por pares
diag=1
A1_A2=c2dato1
A1_A3=c2dato2
A2_A3=c2dato3
A1_A2_Inv=1/A1_A2
A1_A3_Inv=1/A1_A3
A2_A3_Inv=1/A2_A3
matriz_secundaria2={
'A1':{'A1':diag,'A2':A1_A2,'A3':A1_A3},
'A2':{'A1':A1_A2_Inv,'A2':diag,'A3':A2_A3},
'A3':{'A1':A1_A3_Inv,'A2':A2_A3_Inv,'A3':diag},
}
#Primer Paso del AHP
AHPStv_c2paso2=pd.DataFrame(matriz_secundaria2)
AHPStv_c2paso2=AHPStv_c2paso2.T
AHPStv_c2paso2_a=AHPStv_c2paso2.sum()
#Paso2a
AHPStv_c2paso2_a=pd.DataFrame(AHPStv_c2paso2_a)
AHPStv_c2paso2_a=AHPStv_c2paso2_a.T
AHPStv_c2paso2_b=AHPStv_c2paso2.div(AHPStv_c2paso2_a.iloc[0])
AHPStv_c2paso2_c=AHPStv_c2paso2_b.mean(axis=1)
AHPStv_c2paso2_c= | pd.DataFrame(AHPStv_c2paso2_c) | pandas.DataFrame |
import os
import re
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scikit_posthocs as sp
from pandas import DataFrame
from decimal import Decimal
import scipy.stats as ss
from sklearn.preprocessing import StandardScaler
from metalfi.src.data.memory import Memory
class Visualization:
@staticmethod
def fetch_runtime_data(substring, threshold=1000000):
directory = "output/runtime"
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if (name.endswith('.csv') and substring in name)]
data = list()
for name in file_names:
file = Memory.load(name, directory)
data.append((file, name))
summary = {}
columns = list(data[0][0].columns)
columns.pop(0)
for column in columns:
summary[column] = DataFrame(columns=["size", column])
for file, name in data:
for x in summary:
split = name.split("X")
temp = int(split[3][:-4]) * int(split[2])
if temp < threshold:
summary[x] = summary[x].append(pd.Series([temp, file[x].values[0]],
index=summary[x].columns), ignore_index=True)
for x in summary:
summary[x] = summary[x].sort_values(by=["size"])
return summary
@staticmethod
def runtime_graph(name):
target_data = Visualization.fetch_runtime_data("XtargetX")
meta_data = Visualization.fetch_runtime_data("XmetaX")
for x in target_data:
if x == "LOFO" or x == "SHAP" or x == "LIME" or x == "total":
continue
target_data[x][x] /= 5
plt.plot(target_data[x].columns[0], x, data=target_data[x], linewidth=2)
for x in meta_data:
if x == "total" or x == "multivariate":
continue
plt.plot(meta_data[x].columns[0], x, data=meta_data[x], linewidth=2)
plt.legend()
Memory.storeVisual(plt, name)
@staticmethod
def runtime_boxplot(threshold, targets, meta, name):
target_data = Visualization.fetch_runtime_data("XtargetX", threshold)
meta_data = Visualization.fetch_runtime_data("XmetaX", threshold)
data = list()
names = list()
for x in target_data:
if x not in targets:
continue
names.append(x)
target_data[x][x] /= 5
data.append(target_data[x][x].values)
for x in meta_data:
if x not in meta:
continue
names.append(x)
data.append(meta_data[x][x].values)
fig, ax = plt.subplots()
ax.boxplot(data, showfliers=False)
plt.xticks(list(range(1, len(data) + 1)), names)
Memory.storeVisual(plt, name)
@staticmethod
def fetch_predictions():
directory = "output/predictions"
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
for name in file_names:
frame = Memory.load(name, directory).set_index("Unnamed: 0")
for column in frame.columns:
frame = frame.round({column: 3})
path = Memory.getPath() / (directory + "/" + name)
frame.to_csv(path, header=True)
data = [(Memory.load(name, directory).set_index("Unnamed: 0"), name) for name in file_names]
return data
@staticmethod
def performance():
directory = "output/selection"
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
data = [(Memory.load(name, directory).set_index("Unnamed: 0"), name) for name in file_names]
for frame, name in data:
width = 0.2
fig, ax = plt.subplots()
anova = frame.loc["ANOVA"].values
mi = frame.loc["MI"].values
fi = frame.loc["FI"].values
meta = frame.loc["MetaLFI"].values
x = np.arange(len(anova))
pos_anova = ax.bar(x - 1.5 * width, anova, width, label="ANOVA")
pos_mi = ax.bar(x - width / 2, mi, width, label="MI")
pos_fi = ax.bar(x + width / 2, fi, width, label="FI")
pos_meta = ax.bar(x + 1.5 * width, meta, width, label="MetaLFI")
"""plt.bar(pos_anova, anova, label="ANOVA")
plt.bar(pos_mi, mi, label="MI")"""
"""plt.bar(pos_fi, fi, label="FI")
plt.bar(pos_meta, meta, label="MetaLFI")"""
ax.set_ylabel("Acc. Scores")
ax.set_yticks([0.775, 0.8, 0.825, 0.85])
ax.set_xticks(x)
ax.set_xticklabels(list(frame.columns))
ax.legend()
plt.ylim([0.75, 0.85])
Memory.storeVisual(plt, name[:-4])
@staticmethod
def metaFeatureImportance():
directory = "output/importance"
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
data = [(Memory.load(name, directory).set_index("Unnamed: 0"), name) for name in file_names]
for frame, name in data:
frame = frame.sort_values(by="mean absolute SHAP")
plt.barh(list(frame["meta-features"])[:15], list(frame["mean absolute SHAP"])[:15])
plt.yticks(list(frame["meta-features"])[:15], list(frame["meta-features"])[:15])
Memory.storeVisual(plt, name[:-4])
@staticmethod
def compareMeans(folder):
directory = "output/questions/" + folder
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
data = [(Memory.load(name, directory).set_index("Unnamed: 0"), name) for name in file_names]
for data_frame, metric in data:
d = list()
names = list()
ranks = [0] * len(data_frame.columns)
for i in range(len(data_frame.index)):
copy = data_frame.iloc[i].values
values = np.array(copy) if ("RMSE" in metric) else np.array(list(map(lambda x: -x, copy)))
temp = values.argsort()
current_ranks = np.array([0] * len(values))
current_ranks[temp] = np.arange(len(values))
current_ranks = list(map(lambda x: x + 1, current_ranks))
ranks = list(map(np.add, ranks, current_ranks))
ranks = list(map(lambda x: x / len(data_frame.index), ranks))
for column in data_frame.columns:
names.append(column)
d.append(data_frame[column].values)
if len(names) < 10:
val, p_value = ss.friedmanchisquare(*d)
if p_value < 0.05:
Visualization.createTimeline(names, ranks, metric,
sp.sign_array(sp.posthoc_nemenyi_friedman(np.array(d).T)), d)
@staticmethod
def createTimeline(names, ranks, metric, sign_matrix, data):
fig, ax = plt.subplots(2)
levels = np.tile([-6, 6, -4, 4, -2, 2], len(ranks))[:len(ranks)]
marker, _, _ = ax[0].stem(ranks, levels, linefmt="C3--", basefmt="k-", use_line_collection=True)
marker.set_ydata(np.zeros(len(ranks)))
plt.setp(marker, mec="k", mfc="k")
vert = np.array(list(map(lambda x: "top" if x > 0 else "bottom", levels)))
for i in range(len(ranks)):
ax[0].annotate(names[i], (ranks[i], levels[i]), va=vert[i], xytext=(3, 3), textcoords="offset points")
ax[0].get_yaxis().set_visible(False)
ax[0].spines["left"].set_visible(False)
ax[0].spines["top"].set_visible(False)
ax[0].spines["right"].set_visible(False)
d = {name: [] for name in names}
remove = list()
for i in range(len(sign_matrix) - 1):
for j in range(i + 1, len(sign_matrix[0])):
if (sign_matrix[i][j] == 0) and (j not in remove):
d[names[i]].append(j)
remove.append(j)
colors = ["forestgreen", "royalblue", "gold"]
c = 0
for i in range(len(d.keys())):
indices = d[names[i]]
indices.append(i)
if len(indices) > 1:
values = [ranks[index] for index in indices]
ax[0].axvspan(max(values), min(values), facecolor=colors[c % len(colors)], alpha=0.2)
c += 1
ax[1].boxplot(data, notch=True, showfliers=False)
ax[1].set_xticks(list(range(1, len(data) + 1)))
ax[1].set_xticklabels(names)
Memory.storeVisual(plt, metric[:-4])
@staticmethod
def correlateMetrics():
new = {"r2": list(), "r": list(), "rmse": list()}
directory = "output/predictions"
path = (Memory.getPath() / directory)
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
data = [(Memory.load(name, directory), name) for name in file_names]
columns = data[0][0].columns
for d, n in data:
for column in columns[1:]:
new[n[:-9]] += list(d[column].values)
frame = DataFrame.from_dict(new)
corr = frame.corr("spearman")
path = Memory.getPath() / ("visual/metrics_corr.csv")
corr.to_csv(path, header=True)
return data
@staticmethod
def correlateTargets():
directory = "input"
path = (Memory.getPath() / directory)
sc = StandardScaler()
data = list()
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
for name in file_names:
d = Memory.load(name, directory)
df = DataFrame(data=sc.fit_transform(d), columns=d.columns)
data.append(df)
frame = pd.concat(data)
lofo = [x for x in frame.columns if "LOFO" in x]
shap = [x for x in frame.columns if "SHAP" in x]
pimp = [x for x in frame.columns if "PIMP" in x]
lime = [x for x in frame.columns if "LIME" in x]
lm = [x for x in frame.columns if not x.startswith("target_")]
matrix = frame.corr("spearman")
matrix = matrix.drop([x for x in lofo + shap + pimp + lime + lm], axis=0)
matrix = matrix.drop([x for x in list(frame.columns) if x not in lofo + shap + pimp + lime], axis=1)
def f(targets): return np.round(np.mean([np.mean(list([val for val in list(map(abs, matrix[x].values)) if val < 1])) for x in targets]), 2)
def f_2(targets): return np.round(np.max([np.mean(list([val for val in list(map(abs, matrix[x].values)) if val < 1])) for x in targets]), 2)
d = {'lofo': [f(lofo), f_2(lofo)], 'shap': [f(shap), f_2(shap)], 'lime': [f(lime), f_2(lime)], 'pimp': [f(pimp), f_2(pimp)]}
data_frame = pd.DataFrame(data=d, index=["mean", "max"], columns=["lofo", "shap", "lime", "pimp"])
path = Memory.getPath() / ("visual/target_corr.csv")
data_frame.to_csv(path, header=True)
@staticmethod
def createHistograms():
directory = "input"
path = (Memory.getPath() / directory)
data = list()
file_names = [name for name in os.listdir(path) if not name.endswith(".gitignore")]
for name in file_names:
d = Memory.load(name, directory)
df = DataFrame(data=d, columns=d.columns)
data.append(df)
frame = | pd.concat(data) | pandas.concat |
import json, os, sys
from pprint import pprint as print
from datetime import datetime
from datetime import date
from collections import Counter
from collections import OrderedDict
import pandas as pd
import lh3.api as lh3
client = lh3.Client()
chats = client.chats()
FRENCH_QUEUES = [
"algoma-fr",
"clavardez",
"laurentian-fr",
"ottawa-fr",
"saintpaul-fr",
"western-fr",
"york-glendon-fr",
]
SMS_QUEUES = [
"carleton-txt",
"clavardez-txt",
"guelph-humber-txt",
"mcmaster-txt",
"ottawa-fr-txt",
"ottawa-txt",
"scholars-portal-txt",
"western-txt",
"york-txt",
]
PRACTICE_QUEUES = ["practice-webinars", "practice-webinars-fr", "practice-webinars-txt"]
LIST_OF_HOURS = dict()
UNANSWERED_CHATS = list()
UNANSWERED_CHATS_HTML = ['<h1 align="center">UNANSWERED CHATS</h1><hr/><br/>']
def french_queues(chats):
french = list()
for chat in chats:
if chat.get("queue") in FRENCH_QUEUES:
french.append(chat)
return french
def sms_queues(chats):
sms = list()
for chat in chats:
if chat.get("queue") in SMS_QUEUES:
sms.append(chat)
return sms
def remove_practice_queues(chats_this_day):
res = [chat for chat in chats_this_day if not "practice" in chat.get("queue")]
return res
def get_chat_for_this_day(this_day):
day = this_day.day
year = this_day.year
month = this_day.month
all_chats = chats.list_day(year, month, day)
return all_chats
def get_daily_stats(chats_this_day, chat_not_none, today):
unanswered_chats = [chat for chat in chats_this_day if chat.get("accepted") is None]
answered_chats_nbr = len(chats_this_day) - len(unanswered_chats)
french_chats = french_queues(chat_not_none)
sms_chats = sms_queues(chat_not_none)
data = []
data.append(
{
# 'Date': today,
"Day": today.strftime("%A, %b %d %Y"),
"Total chats": len(chats_this_day),
"Total Answered Chats": answered_chats_nbr,
"Total UnAnswered Chats": len(unanswered_chats),
"Total French Answered": len(french_chats),
"Total SMS Answered": len(sms_chats),
}
)
return data
def get_chat_per_hour(chat_not_none):
chat_per_hour_not_none = list()
for chat in chat_not_none:
d = datetime.strptime(chat.get("started"), "%Y-%m-%d %H:%M:%S")
chat["hour"] = d.hour
chat_per_hour_not_none.append(d.hour)
nb_chat_per_hours = dict(Counter(chat_per_hour_not_none))
sort_dic_hourly = {}
for i in sorted(nb_chat_per_hours):
sort_dic_hourly.update({i: nb_chat_per_hours[i]})
return sort_dic_hourly
def list_of_un_answered_chats(all_chats, this_day, queues):
chats_this_day = remove_practice_queues(all_chats)
chat_is_none = [chat for chat in chats_this_day if chat.get("accepted") == None]
for chat in chat_is_none:
# breakpoint()
try:
queue = [q for q in queues if q["name"] == chat.get("queue")]
url = (
"https://ca.libraryh3lp.com/dashboard/queues/"
+ str(queue[0].get("id"))
+ "/calls/"
+ str(chat.get("guest"))
+ "/"
+ str(chat.get("id"))
)
chat.update({"transcript_url": url})
UNANSWERED_CHATS.append(chat)
UNANSWERED_CHATS_HTML.append(
"<p>"
+ "<a target='_blank' href='"
+ url
+ "'>"
+ chat.get("started")
+ "--> "
+ chat.get("profile")
+ " --> "
+ chat.get("protocol")
+ "</a>"
+ "'</p>"
)
transcript = (
client.one("chats", chat.get("id")).get()["transcript"]
or "<h3>No transcript found</h3>"
)
UNANSWERED_CHATS_HTML.append(transcript + "<hr/>")
except:
pass
return chat_is_none
def main(all_chats, this_day):
chats_this_day = remove_practice_queues(all_chats)
chat_not_none = [chat for chat in chats_this_day if chat.get("accepted") != None]
data = get_daily_stats(chats_this_day, chat_not_none, this_day)
data = data[-1]
sort_dic_hourly = get_chat_per_hour(chat_not_none)
print(data)
report = data.update(sort_dic_hourly)
LIST_OF_HOURS.update(sort_dic_hourly)
return data
# update_excel_file(data, sort_dic_hourly)
def unanswered_chats():
# print(UNANSWERED_CHATS)
df = pd.DataFrame(UNANSWERED_CHATS)
try:
del df["duration"]
del df["reftracker_id"]
del df["reftracker_url"]
del df["desktracker_id"]
del df["desktracker_url"]
del df["wait"]
del df["referrer"]
del df["ip"]
del df["accepted"]
except:
print("error on deleting columns")
df["started"] = pd.to_datetime(df["started"])
df["ended"] = pd.to_datetime(df["ended"])
df["started_time"] = df["started"].apply(lambda x: x.time())
df["ended_time"] = None # df['ended'].apply(lambda x:x.time())
del df["ended"]
df["guest"] = df["guest"].apply(lambda x: x[0:7])
df["shift"] = df["started"].dt.hour
cols = [
"id",
"guest",
"protocol",
"started",
"started_time",
"shift",
"queue",
"operator",
"ended_time",
"profile",
"transcript_url",
]
df = df[cols]
df.sort_values(by=["id"])
return df
def save_un_into_file(df):
df.to_excel("UNANSWERED_CHATS.xlsx", index=False)
try:
os.remove("unanswered_chats.html")
except:
pass
for val in UNANSWERED_CHATS_HTML:
with open("unanswered_chats.html", "a", encoding="utf-8") as f:
f.write(val)
def find_data_for_report(today=datetime.now()):
queues = client.all("queues").get_list()
month = today.month
year = today.year
day = today.day
report = list()
for loop_day in range(1, day + 1):
all_chats = get_chat_for_this_day(date(year, month, loop_day))
report.append(main(all_chats, date(year, month, loop_day)))
list_of_un_answered_chats(all_chats, date(year, month, loop_day), queues)
return report
def real_report():
report = find_data_for_report()
print(str(report))
df = | pd.DataFrame(report) | pandas.DataFrame |
#Sample Lightcurve class with Kapernka model
from scipy.optimize import curve_fit, minimize
import numpy as np
import matplotlib.pyplot as plt
import os
from JSON_to_DF import JSON_to_DataFrame
import ntpath
import json
import pandas as pd
import celerite
import pickle
#Create Kernels for Gaussian Process
#Real term parameter initialization
a = 1e-4
c = 1
#Matern term parameter initialization
sig = 1e-2
rho = 100
#Bounds on parameters
bounds = dict(log_a = (-15,15), log_c = (-15,15))
bounds = dict(log_sigma = (-15, 15), log_rho = (-15, 15))
#Create Kernels
Real_Kernel = celerite.terms.RealTerm(log_a = np.log(a), log_c = np.log(c), bounds=bounds)
Matern_Kernel = celerite.terms.Matern32Term(log_sigma = np.log(sig), log_rho = np.log(rho))
def deserialize(path):
pickle_in = open(path, "rb")
return pickle.load(pickle_in)
class Supernovae:
#Create Kernels for Gaussian Process
#Real term parameter initialization
a = 1e-4
c = 1
#Matern term parameter initialization
sig = 1e-2
rho = 100
#Bounds on parameters
bounds = dict(log_a = (-15,15), log_c = (-15,15))
bounds2 = dict(log_sigma = (-15, 15), log_rho = (-15, 15))
#Create Kernels
Real_Kernel = celerite.terms.RealTerm(log_a = np.log(a), log_c = np.log(c), bounds=bounds)
Matern_Kernel = celerite.terms.Matern32Term(log_sigma = np.log(sig), log_rho = np.log(rho))
def __init__(self, path):
self.name = os.path.splitext(os.path.basename(path))[0]
self.path = path
def meta_data(self):
file_ = open(self.path)
data = json.load(file_)
self.references = pd.DataFrame((data[self.name]['sources']))
if('redshift' in data[self.name]):
self.z = pd.DataFrame(data[self.name]['redshift']).value[0]
else:
self.z = False
def load_LightCurves(self):
df = JSON_to_DataFrame(self.path)
Lightcurves = {}
if(not isinstance(df, pd.DataFrame)):
self.Lightcurves = Lightcurves
return
band_ref = df[['band', 'source']]
pairs = []
for row in band_ref.iterrows():
if( | pd.isnull(row[1]['band']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> Reading a standard ReEDS output file </li>
# <li> Saving PCA data as PV ICE input format </li>
# <li> Saving State data as PV ICE input format </li>
# </ol>
#
# In[1]:
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP')
print ("Your simulation will be stored in %s" % testfolder)
# ## Reading a standard ReEDS output file
# In[3]:
reedsFile = str(Path().resolve().parent.parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v3a.xlsx')
print ("Input file is stored in %s" % reedsFile)
# In[4]:
REEDSInput = pd.read_excel(reedsFile,
# sheet_name="new installs PV (2)")
sheet_name="new installs PV")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
# ## Save Input Files by PCA
# #### Create a copy of the REEDS Input and modify structure for PCA focus
# In[5]:
rawdf = REEDSInput.copy()
rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True) #tech=pvtotal from "new installs PV sheet", so can drop
rawdf.set_index(['Scenario','Year','PCA'], inplace=True)
rawdf.head(21)
# #### Loading Module Baseline. Will be used later to populate all the columsn otehr than 'new_Installed_Capacity_[MW]' which will be supplied by the REEDS model
# In[6]:
import PV_ICE
r1 = PV_ICE.Simulation(name='Simulation1', path=testfolder)
r1.createScenario(name='US', file=r'..\baselines\SolarFutures_2021\baseline_modules_US_Reeds.csv')
baseline = r1.scenario['US'].data
baseline = baseline.drop(columns=['new_Installed_Capacity_[MW]'])
baseline.set_index('year', inplace=True)
baseline.index = pd.PeriodIndex(baseline.index, freq='A') # A -- Annual
baseline.head()
# #### For each Scenario and for each PCA, combine with baseline and save as input file
# In[7]:
for ii in range (len(rawdf.unstack(level=1))):
PCA = rawdf.unstack(level=1).iloc[ii].name[1]
SCEN = rawdf.unstack(level=1).iloc[ii].name[0]
SCEN=SCEN.replace('+', '_')
filetitle = SCEN+'_'+PCA +'.csv'
subtestfolder = os.path.join(testfolder, 'PCAs')
if not os.path.exists(subtestfolder):
os.makedirs(subtestfolder)
filetitle = os.path.join(subtestfolder, filetitle)
A = rawdf.unstack(level=1).iloc[ii]
A = A.droplevel(level=0)
A.name = 'new_Installed_Capacity_[MW]'
A = pd.DataFrame(A)
A.index=pd.PeriodIndex(A.index, freq='A')
A = pd.DataFrame(A)
A['new_Installed_Capacity_[MW]'] = A['new_Installed_Capacity_[MW]'] * 0.85
A['new_Installed_Capacity_[MW]'] = A['new_Installed_Capacity_[MW]'] * 1000 # ReEDS file is in GW.
# Add other columns
A = pd.concat([A, baseline.reindex(A.index)], axis=1)
header = "year,new_Installed_Capacity_[MW],mod_eff,mod_reliability_t50,mod_reliability_t90," "mod_degradation,mod_lifetime,mod_MFG_eff,mod_EOL_collection_eff,mod_EOL_collected_recycled," "mod_Repair,mod_MerchantTail,mod_Reuse\n" "year,MW,%,years,years,%,years,%,%,%,%,%,%\n"
with open(filetitle, 'w', newline='') as ict:
# Write the header lines, including the index variable for
# the last one if you're letting Pandas produce that for you.
# (see above).
for line in header:
ict.write(line)
# savedata.to_csv(ict, index=False)
A.to_csv(ict, header=False)
# In[ ]:
# ## Save Input Files By States
# #### Reassign data from REEDS Input, as we need one of the columns we dropped.
# In[8]:
rawdf = REEDSInput.copy()
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
rawdf.head(21)
# #### Group data so we can work with the States instead
# In[9]:
#df = rawdf.groupby(['Scenario','State', 'Year'])['Capacity (GW)'].sum(axis=0)
df = rawdf.groupby(['Scenario','State', 'Year'])['Capacity (GW)'].sum()
df = | pd.DataFrame(df) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = | Panel.from_dict(d4) | pandas.core.panel.Panel.from_dict |
from linkml_runtime import SchemaView
import pandas as pd
# meta_view = SchemaView("https://raw.githubusercontent.com/linkml/linkml-model/main/linkml_model/model/schema/meta.yaml")
# sis = meta_view.class_induced_slots('slot_definition')
# for i in sis:
# print(i.name)
schema_file = "../artifacts/nmdc_dh.yaml"
selected_class = "sediment"
schema_view = SchemaView(schema_file)
cis = schema_view.class_induced_slots(selected_class)
lod = []
for i in cis:
range_obj = schema_view.get_element(i.range)
range_type_obj = type(range_obj)
rtc_class_name = range_type_obj.class_name
lod.append(
{
"slot_name": i.name,
"range": i.range,
"string_serialization": i.string_serialization,
"regex_pattern": i.pattern,
"min": i.minimum_value,
"max": i.maximum_value,
"is_identifier": i.identifier,
"range_type": rtc_class_name,
}
)
# print(f"{i.name} {i.range} {i.string_serialization}")
df = | pd.DataFrame(lod) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": | pd.Series([2], dtype=np.int64) | pandas.Series |
import numpy as np
import pandas as pd
from tqdm import tqdm
import holoviews as hv
hv.extension('bokeh')
import datetime
import argparse
def negativeFields(fields, df):
"""
Function to filter anomalous records based on negative (therefore meaningless) values of the `field`
args:
fields: list containing the string name(s) of the field(s) of interest for anomaly detection
fields can be 'pms_pm10_0', 'pms_pm02_5', 'pms_pm01_0', 'lnd_7318u', 'lnd_7318c'
df: pandas dataframe containing 'device', fields, and 'when_captured'
return:
anomaly_dfs: pandas dataframe containing anomalous records: 5 fields are in the list,
['anomaly_type', 'anomaly_details', 'device','severity_score','when_captured']
"""
anomaly_dfs = []
for field in fields:
anomaly_df = df[df[field] < 0][['when_captured', 'device']]
anomaly_df['anomaly_details'] = np.repeat(field + ' < 0', anomaly_df.shape[0])
anomaly_df['severity_score'] = 1
anomaly_dfs.append(anomaly_df)
anomaly_dfs = pd.concat(anomaly_dfs, axis = 0)
anomaly_dfs['anomaly_type'] = 'Negative fields'
return anomaly_dfs
def PMorderingCheck(df):
"""
Function to check PM1.0<PM2.5<PM10.0 (by definition)
If any record violates above definition, report as anomaly
args:
df: pandas dataframe containing 'device', 'pms_pm01_0', 'pms_pm02_5', 'pms_pm10_0', and 'when_captured'
return:
anomaly_dfs: pandas dataframe with containing anomalous records: 5 fields are in the list,
['anomaly_type', 'anomaly_details', device','severity_score','when_captured']
"""
anomaly_dfs = []
anomaly_df1 = df[df['pms_pm02_5'] < df['pms_pm01_0']][['when_captured', 'device']]
anomaly_df1['anomaly_details'] = np.repeat('pms_pm02_5 < pms_pm01_0', anomaly_df1.shape[0])
anomaly_df1['severity_score'] = 1
anomaly_df2 = df[df['pms_pm10_0'] < df['pms_pm01_0']][['when_captured', 'device']]
anomaly_df2['anomaly_details'] = np.repeat('pms_pm10_0 < pms_pm01_0', anomaly_df2.shape[0])
anomaly_df2['severity_score'] = 1
anomaly_df3 = df[df['pms_pm10_0'] < df['pms_pm02_5']][['when_captured', 'device']]
anomaly_df3['anomaly_details'] = np.repeat('pms_pm10_0 < pms_pm02_5', anomaly_df3.shape[0])
anomaly_df3['severity_score'] = 1
anomaly_dfs.append(anomaly_df1)
anomaly_dfs.append(anomaly_df2)
anomaly_dfs.append(anomaly_df3)
anomaly_dfs = pd.concat(anomaly_dfs, axis = 0)
anomaly_dfs['anomaly_type'] = 'PM ordering'
return anomaly_dfs.reset_index(drop=True)
def dataContinuityCheck(df, day_gap=2):
"""
Function to check if two consecutive records for a device differ by more than `day_gap`
If there are consecutive records that differ by > `day_gap` number of days, report them as anomalies
args:
df: pandas dataframe containing 'device', fields, and 'when_captured'
day_gap(double): maximum allowed gap (in units of days) between two consecutive records for a device
return:
anomaly_dfs: pandas dataframe with containing anomalous records: 5 fields are in the list,
['anomaly_type', 'anomaly_details', 'device','severity_score','when_captured']
"""
anomaly_dfs = []
gf = df.groupby('device')
for device in df['device'].unique():
curr_dev = gf.get_group(device)
curr_dev_noNan = curr_dev.dropna()
mask = pd.to_datetime(curr_dev_noNan['when_captured']).diff().dt.days >=day_gap
anomaly_df = curr_dev_noNan[mask][['when_captured', 'device']]
anomaly_df['anomaly_details'] = np.repeat('more than ' + str(day_gap) + ' days of gap between consecutive valid records (post cleaning)', anomaly_df.shape[0])
anomaly_df['severity_score'] = 1
anomaly_dfs.append(anomaly_df)
anomaly_dfs = pd.concat(anomaly_dfs, axis = 0).reset_index(drop=True)
anomaly_dfs['anomaly_type'] = 'Data continuity'
return anomaly_dfs
def rollingMedianDev(fields, df, window, min_period, numStd):
"""
Function to filter anomalous records based on `numStd` number of deviations away from rolling Median
args:
fields: list containing the string name(s) of the field(s) of interest for anomaly detection
df: pandas dataframe containing 'device', fields, and 'when_captured'
window: moving window size for the rolling Median and stddev
min_period: Minimum number of observations in window required to have a value (otherwise result is NAN - Not A Number)
numStd: tolerance in number of standard deviations away from Median for record to be anomalous
return:
pandas dataframe containing anomalous records: 5 fields are in the list
['anomaly_type', 'anomaly_details', 'device','severity_score','when_captured']
overlays: holoviews plot objects with running Median, +/- numStd lines, and data
"""
anomaly_dfs = []
overlays = []
for device in tqdm(df['device'].unique(), mininterval = 30):
overlay_batch = []
for field in fields:
deviceFilter = df['device'] == device
rollingMedian = df[deviceFilter][field].rolling(window, min_periods=min_period).median()
rollingStdev = df[deviceFilter][field].rolling(window, min_periods=min_period).std()
upper = rollingMedian + (rollingStdev * numStd)
lower = rollingMedian - (rollingStdev * numStd)
# visualize
x = pd.to_datetime(df[deviceFilter]['when_captured']).values
line1 = hv.Scatter((x,df[deviceFilter][field]), label='data').opts(width=700, ylabel = field)
line1Median = hv.Curve((x, rollingMedian), label='Median').opts(width=700, color='red', xlabel = 'date')
line1Upper = hv.Curve((x, upper), label='Median+ {}.stdev'.format(numStd)).opts(width=700, color='blue')
line1Lower = hv.Curve((x, lower), label='Median- {}.stdev'.format(numStd)).opts(width=700, color='blue')
overlay = line1Upper * line1Median * line1 * line1Lower
overlay.opts(title="device: "+str(device))
# return the list of anomalies : records where deviation is >= num_std away from Median
# there can be anomalies with the same timestamp because filtering is done on a per field basis
temp = df[deviceFilter].copy()
temp['rollingMedian'] = rollingMedian
temp['rollingStdev'] = rollingStdev
temp['severity_score'] = (temp[field]-temp['rollingMedian'])/(numStd*temp['rollingStdev'])
temp = temp[abs(temp['severity_score']) >= 1]
# instead of string for anomaly type, we can also make a key:value dictionary
# and use that as meta-data for reporting
temp['anomaly_details'] = np.repeat(field + ' ' + str(numStd) + ' or more away from Median', temp.shape[0])
anomaly_dfs.append(temp[['anomaly_details', 'device', 'severity_score', 'when_captured']])
overlay_batch.append(overlay)
overlays.append(overlay_batch)
anomaly_dfs = pd.concat(anomaly_dfs, axis = 0)
anomaly_dfs['anomaly_type'] = 'Rolling median outlier'
return anomaly_dfs, overlays
def rollingStdevDev(fields, df, window, min_period, numStd):
"""
Function to filter anomalous records based on `numStd` number of deviations away from rolling stdev
args:
fields: list containing the string name(s) of the field(s) of interest for anomaly detection
df: pandas dataframe containing 'device', fields, and 'when_captured'
window: moving window size for the rolling stdev and its stddev
min_period: Minimum number of observations in window required to have a value (otherwise result is NA)
numStd: tolerance in number of standard deviations away from rolling stdev for record to be anomalous
return:
pandas dataframe containing anomalous records: 5 fields are in the list
['anomaly_type', 'anomaly_details', 'device','severity_score','when_captured']
overlays: holoviews plot objects with running stdev +/- numStd lines, and data
"""
anomaly_dfs = []
overlays = []
for device in tqdm(df['device'].unique(), mininterval = 30):
overlay_batch = []
for field in fields:
deviceFilter = df['device'] == device
rollingStdev = df[deviceFilter][field].rolling(window, min_periods=min_period).std()
rollingStdevStdev = rollingStdev.rolling(window, min_periods=min_period).std()
upper = rollingStdev + (rollingStdevStdev * numStd)
lower = rollingStdev - (rollingStdevStdev * numStd)
# visualize
x = pd.to_datetime(df[deviceFilter]['when_captured']).values
line1 = hv.Scatter((x,df[deviceFilter][field]), label='data').opts(width=700, ylabel = field)
line1Stdev = hv.Curve((x, rollingStdev), label='Rolling Stdev').opts(width=700, color='red', xlabel = 'date')
line1Upper = hv.Curve((x, upper), label='Roll. Stdev+ {}.stdev'.format(numStd)).opts(width=700, color='blue')
line1Lower = hv.Curve((x, lower), label='Roll. Stdev- {}.stdev'.format(numStd)).opts(width=700, color='blue')
overlay = line1Upper * line1Stdev * line1 * line1Lower
overlay.opts(title="device: "+str(device))
# return the list of anomalies : records where deviation is >= num_std away from Median
temp = df[deviceFilter].copy()
temp['rollingStdev'] = rollingStdev
temp['rollingStdevStdev'] = rollingStdevStdev
temp['severity_score'] = (temp[field]-temp['rollingStdev'])/(numStd*temp['rollingStdevStdev'])
temp = temp[abs(temp['severity_score']) >= 1]
# instead of string for anomaly type, we can also make a key:value dictionary
# and use that as meta-data for reporting
temp['anomaly_details'] = np.repeat(field + ' ' + str(numStd) + ' or more away from local stdev', temp.shape[0])
anomaly_dfs.append(temp[['anomaly_details', 'device', 'severity_score', 'when_captured']])
overlay_batch.append(overlay)
overlays.append(overlay_batch)
anomaly_dfs = pd.concat(anomaly_dfs, axis = 0)
anomaly_dfs['anomaly_type'] = 'Rolling standard deviation outlier'
return anomaly_dfs, overlays
def nightDayDisparity(fields, df, min_rec, day_start, day_end):
"""
Function to filter anomalous records for nights where the median
nighttime field value > median daytime field value for the preceding day
(1 anomaly per 24 hr period)
args:
fields: list containing the string name(s) of the field(s) of interest for anomaly detection
Note these can only be AQ fields: 'pms_pm10_0', 'pms_pm01_0', 'pms_pm02_5'
df: pandas dataframe containing 'device', fields, and 'when_captured'
min_rec: minimum number of readings for there to be a defined (non-NAN) daytime or nightime median field value
day_start: start hour of the day (like 6 for 0600hrs)
day_end: end hour for the day (like 18 for 1800hrs)
Note: daytime on a date is defined as the hours between `day_start` and `day_end`
nighttime on a date is defined as the hours between `day_end` of the current_date and
the `day_start` of the next_date (if next date records are available in the records)
return:
pandas dataframe containing anomalous records: 5 fields are in the list
['anomaly_type', 'anomaly_details', 'device','severity_score','when_captured']
overlays: holoviews plot objects with data segregated by day, night and anomalous positions that need attention
"""
anomaly_dfs = pd.DataFrame()
overlays = []
gf = df.groupby('device')
# add the hour of day column to the data frame
df['hod'] = | pd.to_datetime(df['when_captured']) | pandas.to_datetime |
from dataclasses import replace
import datetime as dt
from functools import partial
import inspect
from pathlib import Path
import re
import types
import uuid
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from solarforecastarbiter import datamodel
from solarforecastarbiter.io import api, nwp, utils
from solarforecastarbiter.reference_forecasts import main, models
from solarforecastarbiter.conftest import default_forecast, default_observation
BASE_PATH = Path(nwp.__file__).resolve().parents[0] / 'tests/data'
@pytest.mark.parametrize('model', [
models.gfs_quarter_deg_hourly_to_hourly_mean,
models.gfs_quarter_deg_to_hourly_mean,
models.hrrr_subhourly_to_hourly_mean,
models.hrrr_subhourly_to_subhourly_instantaneous,
models.nam_12km_cloud_cover_to_hourly_mean,
models.nam_12km_hourly_to_hourly_instantaneous,
models.rap_cloud_cover_to_hourly_mean,
models.gefs_half_deg_to_hourly_mean
])
def test_run_nwp(model, site_powerplant_site_type, mocker):
""" to later patch the return value of load forecast, do something like
def load(*args, **kwargs):
return load_forecast_return_value
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(load),))
"""
mocker.patch.object(inspect.unwrap(model), '__defaults__',
(partial(nwp.load_forecast, base_path=BASE_PATH),))
mocker.patch(
'solarforecastarbiter.reference_forecasts.utils.get_init_time',
return_value=pd.Timestamp('20190515T0000Z'))
site, site_type = site_powerplant_site_type
fx = datamodel.Forecast('Test', dt.time(5), pd.Timedelta('1h'),
pd.Timedelta('1h'), pd.Timedelta('6h'),
'beginning', 'interval_mean', 'ghi', site)
run_time = pd.Timestamp('20190515T1100Z')
issue_time = pd.Timestamp('20190515T1100Z')
out = main.run_nwp(fx, model, run_time, issue_time)
for var in ('ghi', 'dni', 'dhi', 'air_temperature', 'wind_speed',
'ac_power'):
if site_type == 'site' and var == 'ac_power':
assert out.ac_power is None
else:
ser = getattr(out, var)
assert len(ser) >= 6
assert isinstance(ser, (pd.Series, pd.DataFrame))
assert ser.index[0] == pd.Timestamp('20190515T1200Z')
assert ser.index[-1] < pd.Timestamp('20190515T1800Z')
@pytest.fixture
def obs_5min_begin(site_metadata):
observation = default_observation(
site_metadata,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
return observation
@pytest.fixture
def observation_values_text():
"""JSON text representation of test data"""
tz = 'UTC'
data_index = pd.date_range(
start='20190101', end='20190112', freq='5min', tz=tz, closed='left')
# each element of data is equal to the hour value of its label
data = pd.DataFrame({'value': data_index.hour, 'quality_flag': 0},
index=data_index)
text = utils.observation_df_to_json_payload(data)
return text.encode()
@pytest.fixture
def session(requests_mock, observation_values_text):
session = api.APISession('')
matcher = re.compile(f'{session.base_url}/observations/.*/values')
requests_mock.register_uri('GET', matcher, content=observation_values_text)
return session
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
# intraday, index=False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
mocker.spy(main.persistence, 'persistence_scalar')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar.call_count == 1
@pytest.mark.parametrize('interval_label', ['beginning', 'ending'])
def test_run_persistence_scalar_index(session, site_metadata, obs_5min_begin,
interval_label, mocker):
run_time = pd.Timestamp('20190101T1945Z')
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1h'),
interval_label=interval_label)
issue_time = pd.Timestamp('20190101T2300Z')
# intraday, index=True
mocker.spy(main.persistence, 'persistence_scalar_index')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time, index=True)
assert isinstance(out, pd.Series)
assert len(out) == 1
assert main.persistence.persistence_scalar_index.call_count == 1
def test_run_persistence_interval(session, site_metadata, obs_5min_begin,
mocker):
run_time = pd.Timestamp('20190102T1945Z')
# day ahead, index = False
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('24h'),
interval_label='beginning')
issue_time = pd.Timestamp('20190102T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, obs_5min_begin, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_weekahead(session, site_metadata, mocker):
variable = 'net_load'
observation = default_observation(
site_metadata, variable=variable,
interval_length=pd.Timedelta('5min'), interval_label='beginning')
run_time = pd.Timestamp('20190110T1945Z')
forecast = default_forecast(
site_metadata, variable=variable,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start=pd.Timedelta('1h'),
interval_length=pd.Timedelta('1h'),
run_length=pd.Timedelta('1d'),
interval_label='beginning')
issue_time = pd.Timestamp('20190111T2300Z')
mocker.spy(main.persistence, 'persistence_interval')
out = main.run_persistence(session, observation, forecast, run_time,
issue_time)
assert isinstance(out, pd.Series)
assert len(out) == 24
assert main.persistence.persistence_interval.call_count == 1
def test_run_persistence_interval_index(session, site_metadata,
obs_5min_begin):
# index=True not supported for day ahead
forecast = default_forecast(
site_metadata,
issue_time_of_day=dt.time(hour=23),
lead_time_to_start= | pd.Timedelta('1h') | pandas.Timedelta |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = | pd.DataFrame() | pandas.DataFrame |
# Import modules
import pickle
import pandas as pd
from psychopy import visual, monitors
from psychopy import core, event
import numpy as np
from titta import Titta, helpers_tobii as helpers
#%% Monitor/geometry participant screen
MY_MONITOR = 'testMonitor' # needs to exists in PsychoPy monitor center
FULLSCREEN = False
SCREEN_RES = [1920, 1080]
SCREEN_WIDTH = 52.7 # cm
VIEWING_DIST = 63 # # distance from eye to center of screen (cm)
mon = monitors.Monitor(MY_MONITOR) # Defined in defaults file
mon.setWidth(SCREEN_WIDTH) # Width of screen (cm)
mon.setDistance(VIEWING_DIST) # Distance eye / monitor (cm)
mon.setSizePix(SCREEN_RES)
# Monitor/geometry operator screen
MY_MONITOR_OP = 'default' # needs to exists in PsychoPy monitor center
FULLSCREEN_OP = False
SCREEN_RES_OP = [1920, 1080]
SCREEN_WIDTH_OP = 52.7 # cm
VIEWING_DIST_OP = 63 # # distance from eye to center of screen (cm)
mon_op = monitors.Monitor(MY_MONITOR_OP) # Defined in defaults file
mon_op.setWidth(SCREEN_WIDTH_OP) # Width of screen (cm)
mon_op.setDistance(VIEWING_DIST_OP) # Distance eye / monitor (cm)
mon_op.setSizePix(SCREEN_RES_OP)
# Window set-up (this color will be used for calibration)
win = visual.Window(monitor = mon, fullscr = FULLSCREEN,
screen=1, size=SCREEN_RES, units = 'deg')
win_op = visual.Window(monitor = mon_op, fullscr = FULLSCREEN_OP,
screen=0, size=SCREEN_RES_OP, units = 'norm')
fixation_point = helpers.MyDot2(win)
image = visual.ImageStim(win, image='im1.jpeg', units='norm', size = (2, 2))
#%% ET settings
et_name = 'Tobii Pro Spectrum'
# et_name = 'Tobii4C'
dummy_mode = False
bimonocular_calibration = False
# Change any of the default dettings?e
settings = Titta.get_defaults(et_name)
settings.FILENAME = 'testfile.tsv'
#%% Connect to eye tracker and calibrate
tracker = Titta.Connect(settings)
if dummy_mode:
tracker.set_dummy_mode()
tracker.init()
# Calibrate
if bimonocular_calibration:
tracker.calibrate(win, win_operator=win_op, eye='left', calibration_number = 'first')
tracker.calibrate(win, win_operator=win_op, eye='right', calibration_number = 'second')
else:
tracker.calibrate(win, win_operator=win_op)
#%% Record some data
tracker.start_recording(gaze_data=True, store_data=True)
# Present fixation dot and wait for one second
fixation_point.draw()
t = win.flip()
tracker.send_message('fixation target onset')
core.wait(1)
tracker.send_message('fixation target offset')
image.draw()
t = win.flip()
tracker.send_message('image onset')
core.wait(3)
tracker.send_message('image offset')
win.flip()
tracker.stop_recording(gaze_data=True)
# Close window and save data
win.close()
win_op.close()
tracker.save_data()
#%% Open pickle and write et-data and messages to tsv-files.
f = open(settings.FILENAME[:-4] + '.pkl', 'rb')
gaze_data = pickle.load(f)
msg_data = pickle.load(f)
# Save data and messages
df = | pd.DataFrame(gaze_data, columns=tracker.header) | pandas.DataFrame |
"""
국토교통부 Open API
molit(Ministry of Land, Infrastructure and Transport)
1. Transaction 클래스: 부동산 실거래가 조회
- AptTrade: 아파트매매 실거래자료 조회
- AptTradeDetail: 아파트매매 실거래 상세 자료 조회
- AptRent: 아파트 전월세 자료 조회
- AptOwnership: 아파트 분양권전매 신고 자료 조회
- OffiTrade: 오피스텔 매매 신고 조회
- OffiRent: 오피스텔 전월세 신고 조회
- RHTrade: 연립다세대 매매 실거래자료 조회
- RHRent: 연립다세대 전월세 실거래자료 조회
- DHTrade: 단독/다가구 매매 실거래 조회
- DHRent: 단독/다가구 전월세 자료 조회
- LandTrade: 토지 매매 신고 조회
- BizTrade: 상업업무용 부동산 매매 신고 자료 조회
2. Building 클래스: 건축물대장정보 서비스
01 건축물대장 기본개요 조회: getBrBasisOulnInfo
02 건축물대장 총괄표제부 조회: getBrRecapTitleInfo
03 건축물대장 표제부 조회: getBrTitleInfo
04 건축물대장 층별개요 조회: getBrFlrOulnInfo
05 건축물대장 부속지번 조회: getBrAtchJibunInfo
06 건축물대장 전유공용면적 조회: getBrExposPubuseAreaInfo
07 건축물대장 오수정화시설 조회: getBrWclfInfo
08 건축물대장 주택가격 조회: getBrHsprcInfo
09 건축물대장 전유부 조회: getBrExposInfo
10 건축물대장 지역지구구역 조회: getBrJijiguInfo
"""
import datetime
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
class Transaction:
"""
부동산 실거래가 조회 클래스
"""
def __init__(self, serviceKey):
"""
공공 데이터 포털에서 발급받은 Service Key를 입력받아 초기화합니다.
"""
# Open API 서비스 키 초기화
self.serviceKey = serviceKey
# ServiceKey 유효성 검사
self.urlAptTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTrade?serviceKey="
+ self.serviceKey)
self.urlAptTradeDetail = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptTradeDev?serviceKey="
+ self.serviceKey)
self.urlAptRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcAptRent?serviceKey="
+ self.serviceKey)
self.urlAptOwnership = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSilvTrade?serviceKey="
+ self.serviceKey)
self.urlOffiTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiTrade?serviceKey="
+ self.serviceKey)
self.urlOffiRent = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcOffiRent?serviceKey="
+ self.serviceKey)
self.urlRHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHTrade?serviceKey="
+ self.serviceKey)
self.urlRHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcRHRent?serviceKey="
+ self.serviceKey)
self.urlDHTrade = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHTrade?serviceKey="
+ self.serviceKey)
self.urlDHRent = (
"http://openapi.molit.go.kr:8081/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcSHRent?serviceKey="
+ self.serviceKey)
self.urlLandTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcLandTrade?serviceKey="
+ self.serviceKey)
self.urlBizTrade = (
"http://openapi.molit.go.kr/OpenAPI_ToolInstallPackage/service/rest/RTMSOBJSvc/getRTMSDataSvcNrgTrade?serviceKey="
+ self.serviceKey)
# Open API URL Dict
urlDict = {
"아파트매매 실거래자료 조회": self.urlAptTrade,
"아파트매매 실거래 상세 자료 조회": self.urlAptTradeDetail,
"아파트 전월세 자료 조회": self.urlAptRent,
"아파트 분양권전매 신고 자료 조회": self.urlAptOwnership,
"오피스텔 매매 신고 조회": self.urlOffiTrade,
"오피스텔 전월세 신고 조회": self.urlOffiRent,
"연립다세대 매매 실거래자료 조회": self.urlRHTrade,
"연립다세대 전월세 실거래자료 조회": self.urlRHRent,
"단독/다가구 매매 실거래 조회": self.urlDHTrade,
"단독/다가구 전월세 자료 조회": self.urlDHRent,
"토지 매매 신고 조회": self.urlLandTrade,
"상업업무용 부동산 매매 신고 자료 조회": self.urlBizTrade,
}
# 서비스 정상 작동 여부 확인
for serviceName, url in urlDict.items():
result = requests.get(url, verify=False)
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
te = xmlsoup.findAll("header")
if te[0].find("resultCode").text == "00":
print(f">>> {serviceName} 서비스가 정상 작동합니다.")
else:
print(f">>> {serviceName} 서비스키 미등록 오류입니다.")
# 지역 코드 초기화
# 법정동 코드 출처 : https://code.go.kr
path_code = "https://raw.githubusercontent.com/WooilJeong/PublicDataReader/f14e4de3410cc0f798a83ee5934070d651cbd67b/docs/%EB%B2%95%EC%A0%95%EB%8F%99%EC%BD%94%EB%93%9C%20%EC%A0%84%EC%B2%B4%EC%9E%90%EB%A3%8C.txt"
code = pd.read_csv(path_code, encoding="cp949", sep="\t")
code = code.loc[code["폐지여부"] == "존재"]
code["법정구코드"] = list(map(lambda a: str(a)[:5], list(code["법정동코드"])))
self.code = code
def CodeFinder(self, name):
"""
국토교통부 실거래가 정보 오픈API는 법정동코드 10자리 중 앞 5자리인 구를 나타내는 지역코드를 사용합니다.
API에 사용할 구 별 코드를 조회하는 메서드이며, 문자열 지역 명을 입력받고, 조회 결과를 Pandas DataFrame형식으로 출력합니다.
"""
result = self.code[self.code["법정동명"].str.contains(name)][[
"법정동명", "법정구코드"
]]
result.index = range(len(result))
return result
def DataCollector(self, service, LAWD_CD, start_date, end_date):
"""
서비스별 기간별 조회
입력: 서비스별 조회 메서드, 지역코드, 시작월(YYYYmm), 종료월(YYYYmm)
"""
start_date = datetime.datetime.strptime(str(start_date), "%Y%m")
start_date = datetime.datetime.strftime(start_date, "%Y-%m")
end_date = datetime.datetime.strptime(str(end_date), "%Y%m")
end_date = end_date + datetime.timedelta(days=31)
end_date = datetime.datetime.strftime(end_date, "%Y-%m")
ts = pd.date_range(start=start_date, end=end_date, freq="m")
date_list = list(ts.strftime("%Y%m"))
df = pd.DataFrame()
df_sum = pd.DataFrame()
for m in date_list:
print(">>> LAWD_CD :", LAWD_CD, "DEAL_YMD :", m)
DEAL_YMD = m
df = service(LAWD_CD, DEAL_YMD)
df_sum = pd.concat([df_sum, df])
df_sum.index = range(len(df_sum))
return df_sum
def AptTrade(self, LAWD_CD, DEAL_YMD):
"""
01 아파트매매 실거래자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"아파트",
"지번",
"년",
"월",
"일",
"건축년도",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 아파트, 지번, 년, 월, 일, 건축년도, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드", "법정동", "거래일", "아파트", "지번", "전용면적", "층", "건축년도", "거래금액"
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df["아파트"] = df["아파트"].str.strip()
df.index = range(len(df))
# 형 변환
cols = df.columns.drop(["법정동", "거래일", "아파트", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptTradeDetail(self, LAWD_CD, DEAL_YMD):
"""
02 아파트매매 실거래 상세 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptTradeDetail + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"거래금액",
"건축년도",
"년",
"도로명",
"도로명건물본번호코드",
"도로명건물부번호코드",
"도로명시군구코드",
"도로명일련번호코드",
"도로명지상지하코드",
"도로명코드",
"법정동",
"법정동본번코드",
"법정동부번코드",
"법정동시군구코드",
"법정동읍면동코드",
"법정동지번코드",
"아파트",
"월",
"일",
"전용면적",
"지번",
"지역코드",
"층",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[
거래금액,
건축년도,
년,
도로명,
도로명건물본번호코드,
도로명건물부번호코드,
도로명시군구코드,
도로명일련번호코드,
도로명지상지하코드,
도로명코드,
법정동,
법정동본번코드,
법정동부번코드,
법정동시군구코드,
법정동읍면동코드,
법정동지번코드,
아파트,
월,
일,
전용면적,
지번,
지역코드,
층,
]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"아파트",
"지번",
"전용면적",
"층",
"건축년도",
"거래금액",
"법정동본번코드",
"법정동부번코드",
"법정동시군구코드",
"법정동읍면동코드",
"법정동지번코드",
"도로명",
"도로명건물본번호코드",
"도로명건물부번호코드",
"도로명시군구코드",
"도로명일련번호코드",
"도로명지상지하코드",
"도로명코드",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df["아파트"] = df["아파트"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "아파트", "지번", "도로명"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptRent(self, LAWD_CD, DEAL_YMD):
"""
03 아파트 전월세 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"아파트",
"지번",
"년",
"월",
"일",
"건축년도",
"전용면적",
"층",
"보증금액",
"월세금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 아파트, 지번, 년, 월, 일, 건축년도, 전용면적, 층, 보증금액, 월세금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"아파트",
"지번",
"전용면적",
"층",
"건축년도",
"보증금액",
"월세금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["보증금액"] = pd.to_numeric(df["보증금액"].str.replace(",", ""))
df["월세금액"] = pd.to_numeric(df["월세금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "지번", "아파트"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def AptOwnership(self, LAWD_CD, DEAL_YMD):
"""
04 아파트 분양권전매 신고 자료 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlAptOwnership + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"구분",
"년",
"월",
"일",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 구분, 년, 월, 일, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드",
"법정동",
"거래일",
"시군구",
"단지",
"지번",
"구분",
"전용면적",
"층",
"거래금액",
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "시군구", "단지", "지번", "구분"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiTrade(self, LAWD_CD, DEAL_YMD):
"""
05 오피스텔 매매 신고 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlOffiTrade + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"년",
"월",
"일",
"전용면적",
"층",
"거래금액",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 년, 월, 일, 전용면적, 층, 거래금액]],
columns=variables,
)
df = pd.concat([df, data])
# Set Columns
colNames = [
"지역코드", "법정동", "거래일", "시군구", "단지", "지번", "전용면적", "층", "거래금액"
]
# Feature Engineering
try:
if len(df["년"] != 0) & len(df["월"] != 0) & len(df["일"] != 0):
df["거래일"] = df["년"] + "-" + df["월"] + "-" + df["일"]
df["거래일"] = pd.to_datetime(df["거래일"])
df["거래금액"] = pd.to_numeric(df["거래금액"].str.replace(",", ""))
except:
df = pd.DataFrame(columns=colNames)
print("조회할 자료가 없습니다.")
# Arange Columns
df = df[colNames]
df = df.sort_values(["법정동", "거래일"])
df["법정동"] = df["법정동"].str.strip()
df.index = range(len(df))
# 숫자형 변환
cols = df.columns.drop(["법정동", "거래일", "시군구", "단지", "지번"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
return df
except:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("header")
# 정상 요청시 에러 발생 -> Python 코드 에러
if te[0].find("resultCode").text == "00":
print(">>> Python Logic Error. e-mail : <EMAIL>")
# Open API 서비스 제공처 오류
else:
print(">>> Open API Error: {}".format(te[0].find["resultMsg"]))
def OffiRent(self, LAWD_CD, DEAL_YMD):
"""
06 오피스텔 전월세 신고 조회
입력: 지역코드(법정동코드 5자리), 계약월(YYYYmm)
"""
# URL
url_1 = self.urlOffiRent + "&LAWD_CD=" + str(LAWD_CD)
url_2 = "&DEAL_YMD=" + str(DEAL_YMD)
url_3 = "&numOfRows=99999"
url = url_1 + url_2 + url_3
try:
# Get raw data
result = requests.get(url, verify=False)
# Parsing
xmlsoup = BeautifulSoup(result.text, "lxml-xml")
# Filtering
te = xmlsoup.findAll("item")
# Creating Pandas Data Frame
df = pd.DataFrame()
variables = [
"법정동",
"지역코드",
"시군구",
"단지",
"지번",
"년",
"월",
"일",
"전용면적",
"층",
"보증금",
"월세",
]
for t in te:
for variable in variables:
try:
globals()[variable] = t.find(variable).text
except:
globals()[variable] = np.nan
data = pd.DataFrame(
[[법정동, 지역코드, 시군구, 단지, 지번, 년, 월, 일, 전용면적, 층, 보증금, 월세]],
columns=variables,
)
df = | pd.concat([df, data]) | pandas.concat |
import pandas as pd
import joblib
from sklearn.pipeline import Pipeline
from lr_customer_value.config import config
from lr_customer_value import __version__ as _version
import logging
_logger = logging.getLogger(__name__)
def load_dataset(*, files_list: str) -> pd.DataFrame:
data = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import unittest
import numpy as np
import pandas as pd
from hadar import LPOptimizer
from hadar.analyzer.result import Index, ResultAnalyzer, IntIndex
from hadar.optimizer.domain.input import Study
from hadar.optimizer.domain.output import (
OutputConsumption,
OutputLink,
OutputNode,
OutputProduction,
Result,
OutputNetwork,
OutputStorage,
OutputConverter,
)
class TestIndex(unittest.TestCase):
def test_no_parameters(self):
self.assertEqual(True, Index(column="i").all)
def test_on_element(self):
i = Index(column="i", index="fr")
self.assertEqual(False, i.all)
self.assertEqual(("fr",), i.index)
def test_list(self):
i = Index(column="i", index=["fr", "be"])
self.assertEqual(False, i.all)
self.assertEqual(("fr", "be"), i.index)
def test_filter(self):
i = Index(column="i", index=["fr", "be"])
df = pd.DataFrame(
data={
"i": ["it", "fr", "fr", "be", "de", "it", "be"],
"a": [0, 1, 2, 3, 4, 5, 6],
}
)
exp = pd.Series(
data=[False, True, True, True, False, False, True],
index=[0, 1, 2, 3, 4, 5, 6],
name="i",
)
pd.testing.assert_series_equal(exp, i.filter(df))
class TestIntIndex(unittest.TestCase):
def test_range(self):
i = IntIndex("i", index=slice(2, 6))
self.assertEqual(False, i.all)
self.assertEqual((2, 3, 4, 5), i.index)
def test_list(self):
i = IntIndex("i", index=[2, 6])
self.assertEqual(False, i.all)
self.assertEqual((2, 6), i.index)
class TestConsumptionAnalyzer(unittest.TestCase):
def setUp(self) -> None:
self.study = (
Study(horizon=3, nb_scn=2)
.network()
.node("a")
.consumption(
cost=10 ** 3, quantity=[[120, 12, 12], [12, 120, 120]], name="load"
)
.consumption(
cost=10 ** 3, quantity=[[130, 13, 13], [13, 130, 130]], name="car"
)
.node("b")
.consumption(
cost=10 ** 3, quantity=[[120, 12, 12], [12, 120, 120]], name="load"
)
.build()
)
out = {
"a": OutputNode(
consumptions=[
OutputConsumption(quantity=[[20, 2, 2], [2, 20, 20]], name="load"),
OutputConsumption(quantity=[[30, 3, 3], [3, 30, 30]], name="car"),
],
productions=[],
storages=[],
links=[],
),
"b": OutputNode(
consumptions=[
OutputConsumption(quantity=[[20, 2, 2], [2, 20, 20]], name="load")
],
productions=[],
storages=[],
links=[],
),
}
self.result = Result(
networks={"default": OutputNetwork(nodes=out)}, converters={}
)
def test_build_consumption(self):
# Expected
exp = pd.DataFrame(
data={
"cost": [10 ** 3] * 18,
"asked": [
120,
12,
12,
12,
120,
120,
130,
13,
13,
13,
130,
130,
120,
12,
12,
12,
120,
120,
],
"given": [
20,
2,
2,
2,
20,
20,
30,
3,
3,
3,
30,
30,
20,
2,
2,
2,
20,
20,
],
"name": ["load"] * 6 + ["car"] * 6 + ["load"] * 6,
"node": ["a"] * 12 + ["b"] * 6,
"network": ["default"] * 18,
"t": [0, 1, 2] * 6,
"scn": [0, 0, 0, 1, 1, 1] * 3,
},
dtype=float,
)
cons = ResultAnalyzer._build_consumption(self.study, self.result)
pd.testing.assert_frame_equal(exp, cons)
def test_aggregate_cons(self):
# Expected
index = pd.Index(data=[0, 1, 2], dtype=float, name="t")
exp_cons = pd.DataFrame(
data={"asked": [120, 12, 12], "cost": [10 ** 3] * 3, "given": [20, 2, 2]},
dtype=float,
index=index,
)
# Test
agg = ResultAnalyzer(study=self.study, result=self.result)
cons = agg.network().scn(0).node("a").consumption("load").time()
pd.testing.assert_frame_equal(exp_cons, cons)
def test_get_elements_inside(self):
agg = ResultAnalyzer(study=self.study, result=self.result)
np.testing.assert_array_equal((2, 0, 0, 0, 0, 0), agg.get_elements_inside("a"))
np.testing.assert_array_equal((1, 0, 0, 0, 0, 0), agg.get_elements_inside("b"))
class TestProductionAnalyzer(unittest.TestCase):
def setUp(self) -> None:
self.study = (
Study(horizon=3, nb_scn=2)
.network()
.node("a")
.production(cost=10, quantity=[[130, 13, 13], [13, 130, 130]], name="prod")
.node("b")
.production(cost=20, quantity=[[110, 11, 11], [11, 110, 110]], name="prod")
.production(
cost=20, quantity=[[120, 12, 12], [12, 120, 120]], name="nuclear"
)
.build()
)
out = {
"a": OutputNode(
productions=[
OutputProduction(quantity=[[30, 3, 3], [3, 30, 30]], name="prod")
],
consumptions=[],
storages=[],
links=[],
),
"b": OutputNode(
productions=[
OutputProduction(quantity=[[10, 1, 1], [1, 10, 10]], name="prod"),
OutputProduction(
quantity=[[20, 2, 2], [2, 20, 20]], name="nuclear"
),
],
consumptions=[],
storages=[],
links=[],
),
}
self.result = Result(
networks={"default": OutputNetwork(nodes=out)}, converters={}
)
def test_build_production(self):
# Expected
exp = pd.DataFrame(
data={
"cost": [10] * 6 + [20] * 12,
"avail": [
130,
13,
13,
13,
130,
130,
110,
11,
11,
11,
110,
110,
120,
12,
12,
12,
120,
120,
],
"used": [30, 3, 3, 3, 30, 30, 10, 1, 1, 1, 10, 10, 20, 2, 2, 2, 20, 20],
"name": ["prod"] * 12 + ["nuclear"] * 6,
"node": ["a"] * 6 + ["b"] * 12,
"network": ["default"] * 18,
"t": [0, 1, 2] * 6,
"scn": [0, 0, 0, 1, 1, 1] * 3,
},
dtype=float,
)
prod = ResultAnalyzer._build_production(self.study, self.result)
pd.testing.assert_frame_equal(exp, prod)
def test_aggregate_prod(self):
# Expected
index = pd.MultiIndex.from_tuples(
(
("a", "prod", 0.0),
("a", "prod", 1.0),
("a", "prod", 2, 0),
("b", "prod", 0.0),
("b", "prod", 1.0),
("b", "prod", 2, 0),
),
names=["node", "name", "t"],
)
exp_cons = pd.DataFrame(
data={
"avail": [130, 13, 13, 110, 11, 11],
"cost": [10, 10, 10, 20, 20, 20],
"used": [30, 3, 3, 10, 1, 1],
},
dtype=float,
index=index,
)
# Test
agg = ResultAnalyzer(study=self.study, result=self.result)
cons = agg.network().scn(0).node(["a", "b"]).production("prod").time()
pd.testing.assert_frame_equal(exp_cons, cons)
def test_get_elements_inside(self):
agg = ResultAnalyzer(study=self.study, result=self.result)
np.testing.assert_array_equal((0, 1, 0, 0, 0, 0), agg.get_elements_inside("a"))
np.testing.assert_array_equal((0, 2, 0, 0, 0, 0), agg.get_elements_inside("b"))
class TestStorageAnalyzer(unittest.TestCase):
def setUp(self) -> None:
self.study = (
Study(horizon=3, nb_scn=2)
.network()
.node("b")
.storage(name="store", capacity=100, flow_in=10, flow_out=20, cost=1)
.build()
)
out = {
"b": OutputNode(
storages=[
OutputStorage(
name="store",
capacity=[[10, 1, 1], [1, 10, 10]],
flow_out=[[20, 2, 2], [2, 20, 20]],
flow_in=[[30, 3, 3], [3, 30, 30]],
)
],
consumptions=[],
productions=[],
links=[],
)
}
self.result = Result(
networks={"default": OutputNetwork(nodes=out)}, converters={}
)
def test_build_storage(self):
# Expected
exp = pd.DataFrame(
data={
"max_capacity": [100] * 6,
"capacity": [10, 1, 1, 1, 10, 10],
"max_flow_in": [10] * 6,
"flow_in": [30, 3, 3, 3, 30, 30],
"max_flow_out": [20] * 6,
"flow_out": [20, 2, 2, 2, 20, 20],
"cost": [1] * 6,
"init_capacity": [0] * 6,
"eff": [0.99] * 6,
"name": ["store"] * 6,
"node": ["b"] * 6,
"network": ["default"] * 6,
"t": [0, 1, 2] * 2,
"scn": [0, 0, 0, 1, 1, 1],
},
dtype=float,
)
stor = ResultAnalyzer._build_storage(self.study, self.result)
pd.testing.assert_frame_equal(exp, stor, check_dtype=False)
def test_aggregate_stor(self):
# Expected
index = pd.MultiIndex.from_tuples(
(("b", "store", 0), ("b", "store", 1), ("b", "store", 2)),
names=["node", "name", "t"],
)
exp_stor = pd.DataFrame(
data={
"capacity": [10, 1, 1],
"cost": [1, 1, 1],
"eff": [0.99] * 3,
"flow_in": [30, 3, 3],
"flow_out": [20, 2, 2],
"init_capacity": [0] * 3,
"max_capacity": [100] * 3,
"max_flow_in": [10] * 3,
"max_flow_out": [20] * 3,
},
index=index,
)
# Test
agg = ResultAnalyzer(study=self.study, result=self.result)
stor = agg.network().scn(0).node().storage("store").time()
pd.testing.assert_frame_equal(exp_stor, stor, check_dtype=False)
def test_get_elements_inside(self):
agg = ResultAnalyzer(study=self.study, result=self.result)
np.testing.assert_array_equal((0, 0, 1, 0, 0, 0), agg.get_elements_inside("b"))
class TestLinkAnalyzer(unittest.TestCase):
def setUp(self) -> None:
self.study = (
Study(horizon=3, nb_scn=2)
.network()
.node("a")
.node("b")
.node("c")
.link(src="a", dest="b", quantity=[[110, 11, 11], [11, 110, 110]], cost=2)
.link(src="a", dest="c", quantity=[[120, 12, 12], [12, 120, 120]], cost=2)
.build()
)
blank_node = OutputNode(consumptions=[], productions=[], storages=[], links=[])
out = {
"a": OutputNode(
consumptions=[],
productions=[],
storages=[],
links=[
OutputLink(dest="b", quantity=[[10, 1, 1], [1, 10, 10]]),
OutputLink(dest="c", quantity=[[20, 2, 2], [2, 20, 20]]),
],
),
"b": blank_node,
"c": blank_node,
}
self.result = Result(
networks={"default": OutputNetwork(nodes=out)}, converters={}
)
def test_build_link(self):
# Expected
exp = pd.DataFrame(
data={
"cost": [2] * 12,
"avail": [110, 11, 11, 11, 110, 110, 120, 12, 12, 12, 120, 120],
"used": [10, 1, 1, 1, 10, 10, 20, 2, 2, 2, 20, 20],
"node": ["a"] * 12,
"dest": ["b"] * 6 + ["c"] * 6,
"network": ["default"] * 12,
"t": [0, 1, 2] * 4,
"scn": [0, 0, 0, 1, 1, 1] * 2,
},
dtype=float,
)
link = ResultAnalyzer._build_link(self.study, self.result)
pd.testing.assert_frame_equal(exp, link)
def test_aggregate_link(self):
# Expected
index = pd.MultiIndex.from_tuples(
(("b", 0.0), ("b", 1.0), ("b", 2, 0), ("c", 0.0), ("c", 1.0), ("c", 2, 0)),
names=["dest", "t"],
)
exp_link = pd.DataFrame(
data={
"avail": [110, 11, 11, 120, 12, 12],
"cost": [2, 2, 2, 2, 2, 2],
"used": [10, 1, 1, 20, 2, 2],
},
dtype=float,
index=index,
)
agg = ResultAnalyzer(study=self.study, result=self.result)
link = agg.network().scn(0).node("a").link(["b", "c"]).time()
pd.testing.assert_frame_equal(exp_link, link)
def test_balance(self):
agg = ResultAnalyzer(study=self.study, result=self.result)
np.testing.assert_array_equal(
[[30, 3, 3], [3, 30, 30]], agg.get_balance(node="a")
)
np.testing.assert_array_equal(
[[-10, -1, -1], [-1, -10, -10]], agg.get_balance(node="b")
)
def test_get_elements_inside(self):
agg = ResultAnalyzer(study=self.study, result=self.result)
np.testing.assert_array_equal((0, 0, 0, 2, 0, 0), agg.get_elements_inside("a"))
class TestConverterAnalyzer(unittest.TestCase):
def setUp(self) -> None:
self.study = (
Study(horizon=3, nb_scn=2)
.network()
.node("a")
.to_converter(name="conv", ratio=2)
.network("elec")
.node("a")
.converter(name="conv", to_network="elec", to_node="a", max=10, cost=1)
.build()
)
conv = OutputConverter(
name="conv",
flow_src={("default", "a"): [[10, 1, 1], [1, 10, 10]]},
flow_dest=[[20, 2, 2], [2, 20, 20]],
)
blank_node = OutputNode(consumptions=[], productions=[], storages=[], links=[])
self.result = Result(
networks={
"default": OutputNetwork(nodes={"a": blank_node}),
"elec": OutputNetwork(nodes={"a": blank_node}),
},
converters={"conv": conv},
)
def test_build_dest_converter(self):
# Expected
exp = pd.DataFrame(
data={
"name": ["conv"] * 6,
"network": ["elec"] * 6,
"node": ["a"] * 6,
"flow": [20, 2, 2, 2, 20, 20],
"cost": [1] * 6,
"max": [10] * 6,
"t": [0, 1, 2] * 2,
"scn": [0, 0, 0, 1, 1, 1],
}
)
conv = ResultAnalyzer._build_dest_converter(self.study, self.result)
pd.testing.assert_frame_equal(exp, conv, check_dtype=False)
def test_build_src_converter(self):
# Expected
exp = pd.DataFrame(
data={
"name": ["conv"] * 6,
"network": ["default"] * 6,
"node": ["a"] * 6,
"ratio": [2] * 6,
"flow": [10, 1, 1, 1, 10, 10],
"max": [5] * 6,
"t": [0, 1, 2] * 2,
"scn": [0, 0, 0, 1, 1, 1],
}
)
conv = ResultAnalyzer._build_src_converter(self.study, self.result)
| pd.testing.assert_frame_equal(exp, conv, check_dtype=False) | pandas.testing.assert_frame_equal |
import time
import os
import io
import json
import shutil
import zipfile
import pathlib
import pandas as pd
import boto3
import datetime
import botocore
from dateutil.parser import parse
s3 = boto3.client('s3')
lookoutmetrics_client = boto3.client( "lookoutmetrics")
def lambda_handler(event, context):
#Function to format the date given by the event
def datetime_from_string(s):
try:
dt = datetime.datetime.fromisoformat(s.split("[")[0])
except ValueError:
dt = datetime.datetime.strptime(s.split("[")[0], "%Y-%m-%dT%H:%MZ")
return dt
#Function to update the metricValue_AnomalyScore csv in the case that one already exists
def update_Anomaly_CSV(event,key,bucket,obj,response):
print('object exist')
#Reading the existing file
original_df = pd.read_csv(obj.get("Body"), index_col=False)
file2 = original_df.to_dict('list')
#getting the needed data
metricList = response['MetricList']
dimensionList = response['DimensionList']
metricName = event['impactedMetric']['metricName']
#Column names generator
data2={}
data2['key']=[]
data2['Timestamp'] =[]
for i in dimensionList:
data2[i]=[]
# data2[i]=[]
for i in metricList:
data2[i['MetricName']+'AnomalyMetricValue']=[]
data2[i['MetricName']+'GroupScore']=[]
#Data collection from the event for the CSV
for i in event['impactedMetric']['relevantTimeSeries']:
for a in i['dimensions']:
data2[a['dimensionName']].append(a['dimensionValue'])
data2[metricName+'AnomalyMetricValue'].append(i['metricValue'])
data2[metricName+'GroupScore'].append(event['anomalyScore'])
data2['Timestamp'].append(start_time)
nRow=len(data2['Timestamp'])
nDimension = len(dimensionList)
#key generator
i=0
while i<nRow:
value=''
for a in dimensionList:
value+=str(data2[a][i])
value= str(data2['Timestamp'][i])+value
data2['key'].append(value)
i=i+1
c=0
#Checking if the data is already in the original file and ammend the empty spaces and add the data
for n in data2['key']:
if n in file2['key']:
where=file2['key'].index(n)
file2[metricName+'AnomalyMetricValue'][where] = data2[metricName+'AnomalyMetricValue'][c]
file2[metricName+'GroupScore'][where] =data2[metricName+'GroupScore'][c]
else:
file2['key'].append(data2['key'][c])
for i in dimensionList:
file2[i].append(data2[i][c])
file2[metricName+'AnomalyMetricValue'].append(data2[metricName+'AnomalyMetricValue'][c])
file2[metricName+'GroupScore'].append(data2[metricName+'GroupScore'][c])
file2['Timestamp'].append(dateTime)
c+=1
df = pd.DataFrame.from_dict(data=file2, orient='index')
df2 = df.transpose()
with io.StringIO() as filename:
df2.to_csv(filename, index=False, encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
response = s3.put_object(
Bucket=bucket, Key=key, Body=filename.getvalue()
)
print('updated Anomaly csv saved')
#If the metricValue_AnomalyScore file does not exist it will create one
def generate_Anomaly_CSV(event,key,bucket,response):
#getting the needed data
metricList = response['MetricList']
dimensionList = response['DimensionList']
metricName = event['impactedMetric']['metricName']
pd.options.mode.use_inf_as_na = True
#Column names generator
data2={}
data2['key']=[]
data2['Timestamp'] =[]
for i in dimensionList:
data2[i]=[]
data2[i]=[]
for i in metricList:
data2[i['MetricName']+'AnomalyMetricValue']=[]
data2[i['MetricName']+'GroupScore']=[]
#Data collection for the CSV
for i in event['impactedMetric']['relevantTimeSeries']:
for a in i['dimensions']:
data2[a['dimensionName']].append(a['dimensionValue'])
data2[metricName+'AnomalyMetricValue'].append(i['metricValue'])
data2[metricName+'GroupScore'].append(event['anomalyScore'])
data2['Timestamp'].append(start_time)
nRow=len(data2['Timestamp'])
#key generator
i=0
while i<nRow:
value=''
for a in dimensionList:
value+=str(data2[a][i])
value= str(data2['Timestamp'][i])+value
data2['key'].append(value)
i+=1
df = pd.DataFrame.from_dict(data=data2, orient='index')
df2 = df.transpose()
with io.StringIO() as filename:
df2.to_csv(filename, index=False, encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
response = s3.put_object(
Bucket=bucket, Key=key, Body=filename.getvalue()
)
print('Anomaly csv saved in', key)
#Checks if the metricValue_AnomalyScore file already exists
def Anomaly_CSV_Check(event,key,bucket,response):
try:
obj = s3.get_object(Bucket=bucket,Key=key)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code']=='404' or e.response['Error']['Code']=='NoSuchKey':
print('the Anomaly csv file does not exist and we will generate the very first file now')
generate_Anomaly_CSV(event,key,bucket,response)
else:
print('something else happened')
print('error is', e.response)
raise
else:
update_Anomaly_CSV(event,key,bucket,obj,response)
#Updates the dimensionContributions csv file if it exists
def update_Dimension_CSV(event,key,obj,bucket):
print('object exist')
original_df = pd.read_csv(obj.get("Body"), index_col=False)
file = original_df.to_dict('list')
#Column Titles generation
data = {}
data ['Timestamp'] =[]
data['metricName'] =[]
data['dimensionName'] =[]
data['dimensionValue'] =[]
data['valueContribution'] =[]
#Data collection for the CSV
for i in event['impactedMetric']['dimensionContribution']:
for a in i['dimensionValueContributions']:
data['Timestamp'].append(start_time)
data['dimensionName'].append(i['dimensionName'])
data['dimensionValue'].append(a['dimensionValue'])
data['valueContribution'].append(a['valueContribution'])
data['metricName'].append(event['impactedMetric']['metricName'])
df=pd.DataFrame(data=data)
df2 = pd.DataFrame(data=file)
result = pd.concat([df2, df])
with io.StringIO() as filename:
result.to_csv(filename, index=False, encoding='utf-8', date_format='%Y-%m-%d %H:%M:%S')
response = s3.put_object(
Bucket=bucket, Key=key, Body=filename.getvalue()
)
print('updated Dimension csv saved')
#Generates the dimensionContributions csv file
def generate_Dimension_CSV(event,key,bucket):
#Column Titles generator
data = {}
data ['Timestamp'] =[]
data['metricName'] =[]
data['dimensionName'] =[]
data['dimensionValue'] =[]
data['valueContribution'] =[]
#Data collection for the CSV
for i in event['impactedMetric']['dimensionContribution']:
for a in i['dimensionValueContributions']:
data['Timestamp'].append(start_time)
data['dimensionName'].append(i['dimensionName'])
data['dimensionValue'].append(a['dimensionValue'])
data['valueContribution'].append(a['valueContribution'])
data['metricName'].append(event['impactedMetric']['metricName'])
df= | pd.DataFrame(data=data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Prerequisite:
# 1. the database contains the whole week's data of last week until last Sunday.
# e.g. if today is 9/26/18 Wed, it must contains the data until 9/23/18 Sunday
#
# The program uses ISO Calendar:
# 1. first day and last day of the week are respectively Monday(1) and Sunday(7)
# 2. the last few days could be counted as in the first week of the next year
# e.g. 2014-12-31 is in the week01 of 2015
# vice versa:
# e.g. 2016-01-01 is in the week53 of 2015
#
# If Gmail is used to receive Outlier Alert emails,
# the gmail account's 2 step verification has to be disabled,
# and access from less secure apps should be allowed.
#
# Environment Variables:
# the following variables need to be setup in airflow.cfg
# smtp_host = 'smtp.gmail.com'
# smtp_starttls = True
# smtp_ssl = True
# smtp_user = 'email address owner's email account'
# smtp_password = '<PASSWORD>'
# smtp_port = '587'
# smtp_mail_from = 'email address to send from'
# the folllowing variables need to be setup airflow's webserver UI: Admin -> Variables
# email_to = 'address1, address2'
# email_cc = 'address3, address4'
"""
This DAG is to perform Outlier Detection for each individual Council District of LA city
"""
import logging
import os
from datetime import datetime, timedelta
import airflow
import altair as alt
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from airflow.operators.postgres_operator import PostgresOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.email import send_email
from dateutil.relativedelta import relativedelta
# a csv file with this filename will be saved from a copy of postgres table
filename = "/tmp/myla311.csv"
prefix = "/tmp/"
# get the current Council District data and init cd_dict
cd_list = pd.read_csv(
"https://opendata.arcgis.com/datasets/76104f230e384f38871eb3c4782f903d_13.csv",
index_col=False,
)
cd_list.OBJECTID = cd_list.OBJECTID.astype(float)
cd_dict = {}
for index, row in cd_list.iterrows():
cd_dict[row.OBJECTID] = row.NAME
no_cd = len(cd_dict)
# email parameters Notice: email addresses need to updated in production
message = "This last week's Outliers for Council District {} of LA City:"
test = "<EMAIL>,<EMAIL>"
test1 = "<EMAIL>"
email_to = {key: test for key in cd_dict.keys()}
email_cc = {key: test1 for key in cd_dict.keys()}
subject = "[Alert] MyLA311 Data Outliers"
# outlier type identifiers
INDIV_HIGH_OUTLIER = "HIGH INDIVIDUAL REQUEST TYPE OUTLIER"
INDIV_LOW_OUTLIER = "LOW INDIVIDUAL REQUEST TYPE OUTLIER"
TOTAL_HIGH_OUTLIER = "HIGH TOTAL OUTLER"
TOTAL_LOW_OUTLIER = "LOW TOTAL OUTLIER"
DIFF_HIGH_OUTLIER = "HIGH TOTAL DIFF OUTLIER"
DIFF_LOW_OUTLIER = "LOW TOTAL DIFF OUTLIER"
HIGH_PROCESS_TIME_OUTLIER = "HIGH PROCESS TIME OUTLIER"
LOW_PROCESS_TIME_OUTLIER = "LOW PROCESS TIME OUTLIER"
def make_save_graph(df, cd, col, title):
line = (
alt.Chart(df.reset_index(), title=" - ".join([title, col]))
.mark_line()
.encode(x="year_week:O", y=col + ":Q")
)
rule = (
alt.Chart(df)
.mark_rule(color="red")
.encode(alt.Y("average({})".format(col)), size=alt.value(1))
)
graph = line + rule
filename = "chart-cd{}-{}.png".format(
int(cd), col.replace("/", "-").replace(" ", "-")
)
graph.save(prefix + filename)
return filename
def make_save_boxplot(df, cd, point, title):
# using seaborn
sns.set_style("whitegrid")
fig, ax = plt.subplots(figsize=(8, 8))
ax.set_title(title, fontsize=15)
sns.boxplot(ax=ax, x=df, linewidth=1, color="lightblue")
plt.scatter(point, 0, marker="o", s=100, c="red", linewidths=5, label="Outlier")
ax.legend()
filename = "chart-cd{}-Proc-Time-{}.png".format(
int(cd), title.replace("/", "-").replace(" ", "-")
)
plt.savefig(prefix + filename)
plt.close()
return filename
def detect_outliers(filename, **kwargs):
"""
Outlier Detector that detects the following types of outliers:
1. number of Individual Request Type per week, high and low outliers
2. number of Total requests per week, high and low outliers
3. the difference between last week and the week before
4. request process time high and low outliers
"""
# Retrieve data
logging.info("Data is being read from {}".format(filename))
df = pd.read_csv(filename, index_col=False)
logging.info(
"Data Reading is done from {}. Performing outlier detection".format(filename)
)
df.drop(
columns=["location_address", "location_city", "location_state", "location_zip"],
inplace=True,
)
# change data type from object to datatime
df["createddate"] = pd.to_datetime(df["createddate"], errors="coerce")
df["closeddate"] = | pd.to_datetime(df["closeddate"], errors="coerce") | pandas.to_datetime |
# Get Open Data resource
# Load required packages
import re
import requests
import pandas as pd
import io
# Open Data user agent
def opendata_ua():
"""
"This is used internally to return a standard useragent, supplying a user agent means requests using the package
can be tracked more easily"
:return: a user agent string
"""
headers = {
"User-Agent": "https://github.com/Public-Health-Scotland/py-phsopendata"
}
return headers
# Check if a resource ID is valid
def check_res_id(res_id):
"""
"Used to attempt to validate a res_id before submitting to the API"
:param res_id: a resource ID
:return: TRUE/FALSE indicating the validity of the res_id
"""
res_id_regex = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
if not isinstance(res_id, str):
return False
if not re.search(res_id_regex, res_id):
return False
else:
return True
# Create the URL for the datastore search end-point
def ds_search_url():
"""
"Creates the URL for the datastore search end-point"
:return: a URL
"""
search_url = "https://www.opendata.nhs.scot/api/3/action/datastore_search"
return search_url
# Create the URL for the datastore dump end-point
def ds_dump_url(res_id):
"""
"Creates the URL for the datastore dump end-point"
:param res_id: a resource ID
:return: a URL
"""
dump_url = "https://www.opendata.nhs.scot/datastore/dump/%s?bom=true" % res_id
return dump_url
# Get Open Data resource
def get_resource(res_id, rows=None):
"""
"Used to extract a single resource from an open dataset by resource id (res_id)"
:param res_id: The resource ID as found on https://www.opendata.nhs.scot/ NHS Open Data platform
:param rows: (optional) specify the max number of rows to return use this when testing code to reduce the size of
the request it will default to all data
:return: a Pandas dataframe with the data from the NHS Open Data platform
"""
if not check_res_id(res_id):
raise ValueError("The resource ID supplied ('%s') is invalid" % res_id)
# Define the User Agent to be used for the API call
ua = opendata_ua()
# Return full dataset based on rows argument
if rows is None or rows > 99999:
if rows is not None:
print("Queries for more than 99,999 rows of data will return the full resource.")
try:
response = requests.get(url=ds_dump_url(res_id), headers=ua)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
data_raw = response.text
data = pd.read_csv(io.StringIO(data_raw))
del data['_id']
return data
# Return dataset with specified number of rows
else:
url = "%s?id=%s&limit=%s" % (ds_search_url(), res_id, rows)
try:
response = requests.get(url=url, headers=ua)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
data_raw = response.json()["result"]["records"]
data = | pd.DataFrame(data_raw) | pandas.DataFrame |
import numpy as np
import pandas as pd
import sys, os, getopt
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='TMA patches extractor')
parser.add_argument('-a','--APPROACH', type=str, default='ssl', help='teacher/student approach: ssl (semi-supervised), swsl (semi-weakly supervised)')
parser.add_argument('-d','--DATASET', type=str, default='train', help='dataset to extract patches (train, valid, test)')
args = parser.parse_args()
patch_dir = 'LOCAL/PATH/../Teacher_Student_models/Weakly_Annotated_patches/'+args.DATASET+'_densely/'
csv_filename = patch_dir+'csv_'+args.DATASET+'_densely.csv'
csv_data = pd.read_csv(csv_filename,header=None).values
filenames = []
gps = []
dicts = []
if (args.APPROACH=='ssl'):
approach = 'Semi_Supervised'
elif (args.APPROACH=='swsl'):
approach = 'Semi_Weakly_Supervised'
def create_dict(csv_local):
#br_dict = []
for i in range(len(csv_local)):
br={'filename':csv_local[i,0],'pgp':csv_local[i,1],'sgp':csv_local[i,2],'p_b':csv_local[i,3],'p_gp3':csv_local[i,4],'p_gp4':csv_local[i,5],'p_gp5':csv_local[i,6]}
dicts.append(br)
#return br_dict
def find_csv_patches(line_csv):
dir_name = line_csv[0]
pgp = line_csv[1]
sgp = line_csv[2]
name_dir = os.path.normpath(dir_name).split(os.sep)[-1]
#local_csv_path = patch_dir+name_dir+'/'+name_dir+'_'+NUM_PATCHES_str+'_probs.csv'
local_csv_path = patch_dir+name_dir+'/'+name_dir+'_densely_probs.csv'
csv_local = pd.read_csv(local_csv_path,header=None).values
return csv_local, pgp, sgp
def get_key(x):
if (x==1):
k = 'p_gp3'
elif (x==2):
k = 'p_gp4'
elif (x==3):
k = 'p_gp5'
else:
k = 'p_b'
return k
def sort_dict(array_dict, pattern, threshold):
x = get_key(pattern)
new_array = np.array(sorted(array_dict, key=lambda k: k[x],reverse=True))
p_max = 1.0
i = 0
while (i<len(new_array) and p_max>threshold):
p_max = new_array[i][x]
filenames.append(new_array[i]['filename'])
gps.append(pattern)
i = i+1
#print("pattern: " + str(pattern))
#plot_images(new_array[:10])
def sort_dict(array_dict, pattern):
x = get_key(pattern)
new_array = np.array(sorted(array_dict, key=lambda k: k[x],reverse=True))
return new_array
def analyze_csv(csv_file):
for l in csv_file:
#print(l)
try:
csv_local, pgp, sgp = find_csv_patches(l)
create_dict(csv_local)
#extract_patches(array_dict, pgp, sgp)
#dicts.append(array_dict)
except(FileNotFoundError, IOError):
#print("Wrong file or file path")
pass
analyze_csv(csv_data)
dicts = np.asarray(dicts).flatten()
print(dicts.shape)
def lower_bound(dicts,pattern):
i = 0
x = get_key(pattern)
#print(x)
threshold = 0.5
print(x)
sorted_dicts = np.array(sorted(dicts, key=lambda k: k[x],reverse=True))
while(sorted_dicts[i][x]>threshold and i<len(sorted_dicts)):
i = i+1
return i
# In[17]:
def sort_and_extract(pattern,amount):
x = get_key(pattern)
new_array = np.array(sorted(dicts, key=lambda k: k[x],reverse=True))
i = 0
while (i<amount):
filenames.append(new_array[i]['filename'])
gps.append(pattern)
i = i+1
# In[18]:
#SORTED
#print(dicts)
MAX_AMOUNT_BENIGN = lower_bound(dicts,0)
MAX_AMOUNT_GP3 = lower_bound(dicts,1)
MAX_AMOUNT_GP4 = lower_bound(dicts,2)
MAX_AMOUNT_GP5 = lower_bound(dicts,3)
print(MAX_AMOUNT_BENIGN,MAX_AMOUNT_GP3,MAX_AMOUNT_GP4,MAX_AMOUNT_GP5)
sort_and_extract(0,MAX_AMOUNT_BENIGN)
sort_and_extract(1,MAX_AMOUNT_GP3)
sort_and_extract(2,MAX_AMOUNT_GP4)
sort_and_extract(3,MAX_AMOUNT_GP5)
unique, counts = np.unique(gps, return_counts=True)
print(dict(zip(unique, counts)))
#save file without probabilities
new_csv_filename = patch_dir+'csv_'+args.DATASET+'_densely_semi_annotated_fixed.csv'
File = {'filename':filenames,'gleason_pattern':gps}
df = pd.DataFrame(File,columns=['filename','gleason_pattern'])
np.savetxt(new_csv_filename, df.values, fmt='%s',delimiter=',')
print("NEW CSV SAVED")
print("CREATE SUBSETS")
# CREATE SUBSETS
csv_data = | pd.read_csv(new_csv_filename,header=None) | pandas.read_csv |
import datasets
import pandas as pd
from model_code.generator_bart_qa_answer import qa_s2s_generate_answers
from model_code.generator_bart_qa_train import load_support_doc, make_qa_s2s_model
eli5c = datasets.load_dataset('jsgao/eli5_category')
eli5c_train_docs = load_support_doc('support_docs/eli5c_train_docs.dat')
eli5c_val1_docs = load_support_doc('support_docs/eli5c_val1_docs.dat')
eli5c_val2_docs = load_support_doc('support_docs/eli5c_val2_docs.dat')
tokenizer, model = make_qa_s2s_model(from_file='models/eli5c_bart_model_0.pth')
save_name = 'examples/bart_answer_epoch0.csv'
def gen_answers(dataset, indexes, docs, subset_name, qa_model, qa_tokenizer, results):
for i in indexes:
question = dataset[i]
doc = docs[question['q_id']]
# concatenate question and support document into BART input
question_doc = 'question: {} context: {}'.format(question['title'], doc)
# generate an answer with beam search
answer1, answer2, answer3 = qa_s2s_generate_answers(question_doc, qa_model, qa_tokenizer)
results['Question'] += [question['title']]
results['Answer1'] += [answer1]
results['Answer2'] += [answer2]
results['Answer3'] += [answer3]
results['Subset'] += [subset_name]
qa_results = {
'Question': [],
'Answer1': [],
'Answer2': [],
'Answer3': [],
'Subset': [],
}
gen_answers(eli5c['train'], [12345, 15432, 51232, 57282], eli5c_train_docs, 'train', model, tokenizer, qa_results)
gen_answers(eli5c['validation1'], [0, 123, 3234], eli5c_val1_docs, 'val1', model, tokenizer, qa_results)
gen_answers(eli5c['validation2'], [2, 644, 1476], eli5c_val2_docs, 'val2', model, tokenizer, qa_results)
df = | pd.DataFrame(qa_results) | pandas.DataFrame |
import os
import copy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
import geopandas as gpd
import matplotlib.colors as colors
from plotting.colors import load_color_palette
mpl.rcParams['pdf.fonttype'] = 42
LL_date = '210412'
idph_data_path = '/Volumes/fsmresfiles/PrevMed/Covid-19-Modeling/IDPH line list'
cleaned_line_list_fname = os.path.join(idph_data_path,
'LL_%s_JGcleaned_no_race.csv' % LL_date)
box_data_path = '/Users/jlg1657/Box/NU-malaria-team/data/covid_IDPH'
project_path = '/Users/jlg1657/Box/NU-malaria-team/projects/covid_chicago'
plot_path = os.path.join(project_path, 'Plots + Graphs', '_trend_tracking')
emr_fname = os.path.join(box_data_path, 'emresource_by_region.csv')
spec_coll_fname = os.path.join(box_data_path, 'Corona virus reports', '%s_LL_cases_by_EMS_spec_collection.csv' % LL_date)
shp_path = os.path.join(box_data_path, 'shapefiles')
def load_cleaned_line_list() :
df = pd.read_csv(cleaned_line_list_fname)
return df
def make_heatmap(ax, adf, col) :
palette = sns.color_palette('RdYlBu_r', 101)
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(by=['EMS', 'date'])
ax.fill_between([np.min(df['date']), np.max(df['date']) + timedelta(days=1)],
[0.5, 0.5], [11.5, 11.5], linewidth=0, color=palette[0])
for ems, edf in df.groupby('EMS') :
max_in_col = np.max(edf[col])
print(ems, max_in_col)
for r, row in edf.iterrows() :
ax.fill_between([row['date'], row['date'] + timedelta(days=1)],
[ems-0.5, ems-0.5], [ems+0.5, ems+0.5],
color=palette[int(row[col]/max_in_col*100)],
linewidth=0)
ax.set_title(col)
ax.set_ylabel('EMS region')
formatter = mdates.DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
def heatmap() :
adf = load_cleaned_line_list()
fig = plt.figure(figsize=(10,5))
fig.subplots_adjust(left=0.05, right=0.97)
cols = ['specimen_collection', 'deceased_date']
for c, col in enumerate(cols) :
ax = fig.add_subplot(1,len(cols),c+1)
make_heatmap(ax, adf, col)
plt.savefig(os.path.join(plot_path, 'EMS_cases_deaths_heatmap_%sLL.png' % LL_date))
plt.show()
def aggregate_to_date_spec_collection() :
adf = load_cleaned_line_list()
col = 'specimen_collection'
df = adf.dropna(subset=[col])
df = df.groupby([col, 'EMS'])['id'].agg(len).reset_index()
df = df.rename(columns={'id' : col,
col : 'date'})
df = df.sort_values(by=['EMS', 'date'])
df.to_csv(spec_coll_fname, index=False)
def plot_EMS_by_line(colname) :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df[df['covid_region'].isin(range(1,12))]
df['date'] = pd.to_datetime(df['date'])
# df = df[(df['date'] > date(2020, 2, 29)) & (df['date'] < date(2021, 1, 1))]
col = 'moving_ave'
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
fig = plt.figure(figsize=(11,6))
fig.subplots_adjust(left=0.07, right=0.97, bottom=0.05, top=0.95, hspace=0.3, wspace=0.25)
palette = sns.color_palette('Set1')
formatter = mdates.DateFormatter("%m-%d")
for e, (ems, edf) in enumerate(df.groupby('covid_region')) :
ax = fig.add_subplot(3,4,e+1)
edf['moving_ave'] = edf[colname].rolling(window=7, center=False).mean()
max_in_col = np.max(edf[col])
ax.plot(edf['date'], edf[col], color=palette[0], label=ems)
ax.fill_between(edf['date'].values, [0]*len(edf[col]), edf[col],
color=palette[0], linewidth=0, alpha=0.3)
ax.set_title('region %d' % ems)
ax.set_ylim(0, max_in_col*1.05)
ax.set_xlim(date(2020,3,10), np.max(df['date']))
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
if e%4 == 0 :
ax.set_ylabel(colname)
fig.suptitle(colname)
plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL.png' % (colname, LL_date)))
# plt.savefig(os.path.join(plot_path, 'covid_region_%s_%sLL_2020.pdf' % (colname, LL_date)), format='PDF')
def format_ax(ax, name) :
ax.set_title(name)
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.vcenter, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_ratio_ems() :
def get_ratio(adf, ems, w):
edf = adf[adf['EMS'] == ems]
col = 'specimen_collection'
d = edf[col].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
return recent / back
df = pd.read_csv(spec_coll_fname)
df['date'] = pd.to_datetime(df['date'])
max_date = date(2020, 7, 8)
df = df[df['date'] <= max_date]
ems_shp = gpd.read_file(os.path.join(shp_path, 'EMS_Regions', 'EMS_Regions.shp'))
ems_shp['REGION'] = ems_shp['REGION'].astype(int)
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0.4, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ems_shp['ratio'] = ems_shp['REGION'].apply(lambda x : get_ratio(df, x, week))
ems_shp.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases by specimen collection date\nLL data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'EMS_weekly_case_ratio_%sLL.png' % LL_date))
def load_county_map_with_public_data() :
from public_idph_data import load_county_cases
df = load_county_cases()
county_shp = gpd.read_file(os.path.join(shp_path, 'IL_BNDY_County', 'IL_BNDY_County_Py.shp'))
cols = ['Positive_Cases', 'Deaths', 'Tested']
sdf = df[df['County'].isin(['Cook', 'Chicago'])]
sdf = sdf.groupby('update_date')[cols].agg(np.sum).reset_index()
sdf['County'] = 'Cook'
df = df[~df['County'].isin(['Cook', 'Chicago'])]
df = pd.concat([df, sdf], sort=True)
df['County'] = df['County'].apply(lambda x : x.upper())
df.loc[df['County'] == 'DE WITT', 'County'] = 'DEWITT'
# ds_shp = pd.merge(left=county_shp, right=df, left_on='COUNTY_NAM', right_on='County')
df = df.sort_values(by=['County', 'update_date'])
return county_shp, df
def plot_ratio_county() :
ds_shp, df = load_county_map_with_public_data()
max_date = np.max(df['update_date'])
fig = plt.figure(figsize=(12, 10))
fig.subplots_adjust(top=0.95)
vmin, vmax = 0, 3
norm = MidpointNormalize(vmin=vmin, vcenter=1, vmax=vmax)
def get_ratio(adf, county, w):
cdf = adf[adf['County'] == county.upper()]
# if len(cdf) == 0 :
# return 100
cdf['daily_pos'] = np.insert(np.diff(cdf['Positive_Cases']), 0, 0)
d = cdf['daily_pos'].values
if w == 0:
recent = np.mean(d[-7:])
else:
recent = np.mean(d[-7 * (w + 1):-7 * w])
back = np.mean(d[-7 * (w + 2):-7 * (w + 1)])
if back == 0 and recent == 0 :
return -1
if back == 0 :
return vmax
return min([recent / back, vmax])
for week in range(6) :
ax = fig.add_subplot(2,3,6-week)
format_ax(ax, '%d weeks ago vs %d weeks ago' % (week, week+1))
ds_shp['ratio'] = ds_shp['COUNTY_NAM'].apply(lambda x : get_ratio(df, x, week))
ds_shp.plot(ax=ax, color='#969696', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] == -1]
pdf.plot(ax=ax, color='#313695', edgecolor='0.8',
linewidth=0.8, legend=False)
pdf = ds_shp[ds_shp['ratio'] >= 0]
pdf.plot(column='ratio', ax=ax, cmap='RdYlBu_r', edgecolor='0.8',
linewidth=0.8, legend=False, norm=norm)
sm = plt.cm.ScalarMappable(cmap='RdYlBu_r', norm=norm)
sm._A = []
cbar = fig.colorbar(sm, ax=ax)
fig.suptitle('week over week ratio of cases\npublic data ending ' + str(max_date))
plt.savefig(os.path.join(plot_path, 'county_weekly_case_ratio.png'))
def plot_LL_all_IL() :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df = df.groupby('date')[['cases', 'deaths', 'admissions']].agg(np.sum).reset_index()
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(by='date')
df = df[df['date'] >= date(2020, 3, 15)]
palette = load_color_palette('wes')
formatter = mdates.DateFormatter("%m-%d")
sns.set_style('whitegrid', {'axes.linewidth' : 0.5})
fig = plt.figure(figsize=(8,6))
fig.subplots_adjust(left=0.1, right=0.97, bottom=0.05, top=0.97)
def plot_data(adf, ax, col, color) :
ax.bar(adf['date'].values, adf[col],
align='center', color=color, linewidth=0, alpha=0.5)
adf['moving_ave'] = adf[col].rolling(window=7, center=True).mean()
ax.plot(adf['date'], adf['moving_ave'], '-', color=color)
ax.set_ylabel('positives')
ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.set_ylabel(col)
ax = fig.add_subplot(3,1,1)
plot_data(df, ax, 'cases', palette[0])
ax = fig.add_subplot(3,1,2)
plot_data(df, ax, 'admissions', palette[4])
ax = fig.add_subplot(3,1,3)
plot_data(df, ax, 'deaths', palette[3])
fig.savefig(os.path.join(plot_path, 'IL_cases_deaths_LL%s.png' % LL_date))
fig.savefig(os.path.join(plot_path, 'IL_cases_deaths_LL%s.pdf' % LL_date), format='PDF')
def combo_LL_emr() :
ldf = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_ems.csv' % LL_date))
edf = pd.read_csv(os.path.join(box_data_path, 'Corona virus reports', 'emresource_by_region.csv'))
edf['date'] = pd.to_datetime(edf['date_of_extract'])
edf = edf.rename(columns={'region' : 'EMS'})
edf = edf[['date', 'covid_non_icu', 'confirmed_covid_icu', 'EMS']]
ldf['date'] = pd.to_datetime(ldf['date'])
df = pd.merge(left=ldf, right=edf, on=['date', 'EMS'], how='outer')
df = df.sort_values(by=['EMS', 'date'])
df[df['EMS'] == 11].to_csv(os.path.join(box_data_path, 'Cleaned Data', 'LL_EMR_%s_EMS11.csv' % LL_date), index=False)
def weekly_deaths() :
df = pd.read_csv(os.path.join(box_data_path, 'Cleaned Data', '%s_jg_aggregated_covidregion.csv' % LL_date))
df['date'] = pd.to_datetime(df['date'])
df = df[df['date'] >= date(2020, 3, 1)]
df = df[df['deaths'] > 0]
firstday = np.min(df['date'])
df['week'] = df['date'].apply(lambda x : int((x - firstday).days/7))
df = df.groupby('week')[['deaths']].agg(np.sum).reset_index()
df = df.sort_values(by='week')
df['date'] = df['week'].apply(lambda x : firstday + timedelta(days=7*x))
df2 = copy.copy(df[['week', 'deaths']])
df2['date'] = df2['week'].apply(lambda x : firstday + timedelta(days=7*x+6))
df = | pd.concat([df, df2]) | pandas.concat |
import json
import websocket
import time
import pandas as pd
from minio import Minio
SOCKET = "wss://api2.poloniex.com"
PARAMETERS = {
"command": "subscribe",
"channel": 1002
}
cripto_list = []
usdt_btc_oneminute = []
timestamps = []
rawdata_dict = {}
fullrawdata_header = ["currency pair id", "last trade price", "lowest ask", "highest bid",
"percent change in last 24 hours", "base currency volume in last 24 hours",
"quote currency volume in last 24 hours", "is frozen", "highest trade price in last 24 hours",
"lowest trade price in last 24 hours", "post only", "maintenance mode", "timestamp"]
fullrawdata_dict = {key: [] for key in fullrawdata_header}
counter = 1
def on_open(ws):
print('opened connection')
ws.send(json.dumps(PARAMETERS, indent=3))
print("Command sent")
def on_close(ws):
print("closed connection. Trying to reconnect")
ws = websocket.WebSocketApp(SOCKET,
on_open=on_open,
on_close=on_close,
on_message=on_message,
on_error=on_error)
ws.run_forever()
def on_ping(ws, message):
print(f"Got a ping!")
def on_pong(ws, message):
global counter
print(f"Got a pong: {counter}")
counter += 1
def on_message(ws, message):
global cripto_list
global t0
global counter
cripto_list = message.replace("[", "").replace("]", "").replace('"', "").split(",")
if len(cripto_list) > 2:
t1 = time.time() - t0
# appending field timestamp in cripto_list
cripto_list.append(time.time())
# Selecting USDT_BTC, id 121, and USDT_ETH, id 149
if int(cripto_list[2]) == 121 or int(cripto_list[2]) == 149:
for key in fullrawdata_header:
fullrawdata_dict[key].append(cripto_list[fullrawdata_header.index(key) + 2])
if t1 > 59:
# upload_files("rawdata.csv", "smarttbots3bucket")
print("Uploading files")
df2 = | pd.DataFrame(fullrawdata_dict, columns=fullrawdata_header) | pandas.DataFrame |
import os
import pandas as pd
import logging
FORMAT = ">>> %(filename)s, ln %(lineno)s - %(funcName)s: %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
review_folder = 'Z:\\LYR\\LYR_2017studies\\LYR17_2Dmodelling\\LYR17_1_EDDPD\\review\\133'
# initializing csv file lists
hpc_files = []
review_files = []
PO_conv_files = []
# initializing dataframes
hpc_sum = pd.DataFrame()
log_review = pd.DataFrame()
PO_conv = pd.DataFrame()
review_summary = pd.DataFrame()
# defining the run id from folder being review
run_id = review_folder.split('\\')[-1]
for dir, subdirs, files in os.walk(review_folder):
for review_file in files:
review_filename = os.path.join(dir, review_file)
# add hpc summary csv files to hpc_files list
logging.info(f'{run_id}: Adding hpc_summary.csv files to list...')
if review_file.endswith('hpc_summary.csv'):
hpc_files.append(review_filename)
# add log review csv files to review_files list
logging.info(f'{run_id}: Adding log_review.csv files to list...')
if review_file.endswith('log_review.csv'):
review_files.append(review_filename)
# PO convergence csv files to PO_files list
logging.info(f'{run_id}: Adding PO_convergence.csv files to list...')
if review_file.endswith('PO_convergence_times.csv'):
PO_conv_files.append(review_filename)
for f in hpc_files:
# read csv files into data frame from hpc_file list
logging.info(f'{run_id}: Reading hpc_summary.csv files to data frame...')
df = | pd.read_csv(f) | pandas.read_csv |
import itertools
import pytest
import pandas as pd
import numpy as np
columns = ["ref", "x1", "x2"]
def gen_value(column, line, id=0):
if column == "ref":
return id * 1e5 + line
else:
return np.random.randint(0, 1000)
def gen_df(columns, date_range, id=0, seed=1):
np.random.seed(seed)
periods = date_range.size
df = pd.DataFrame(
{
"date_time": date_range,
**{c: [gen_value(c, l, id) for l in range(periods)] for c in columns},
}
)
return df
def gen_df_with_id(ids, columns, date_range):
dfs = []
for i in ids:
df = gen_df(columns, date_range, i)
df["id"] = i
dfs.append(df)
df = | pd.concat(dfs) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 18:42:31 2020
@author: maxim
"""
import pandas as pd
x=pd.read_excel('data.xlsx')
x=x.iloc[:,1:].T
#数据均值化处理
x_mean=x.mean(axis=1)
for i in range(x.index.size):
x.iloc[i,:] = x.iloc[i,:]/x_mean[i]
#提取参考队列和比较队列
ck=x.iloc[0,:]
cp=x.iloc[1:,:]
# 比较队列与参考队列相减
t= | pd.DataFrame() | pandas.DataFrame |
"""Plotting functions for AnnData.
"""
import collections.abc as cabc
from typing import Optional, Union
from typing import Tuple, Sequence, Collection, Iterable
import numpy as np
import pandas as pd
from anndata import AnnData
from cycler import Cycler
from matplotlib.axes import Axes
from pandas.api.types import is_categorical_dtype
from scipy.sparse import issparse
from matplotlib import pyplot as pl
from matplotlib import rcParams
from matplotlib import gridspec
from matplotlib import patheffects
from matplotlib.colors import is_color_like, Colormap, ListedColormap
from .. import get
from .. import logging as logg
from .._settings import settings
from .._utils import sanitize_anndata, _doc_params
from . import _utils
from ._utils import scatter_base, scatter_group, setup_axes, ColorLike
from ._docs import doc_scatter_basic, doc_show_save_ax, doc_common_plot_args
VALID_LEGENDLOCS = {
'none', 'right margin', 'on data', 'on data export', 'best',
'upper right', 'upper left',
'lower left', 'lower right', 'right',
'center left', 'center right',
'lower center', 'upper center', 'center',
}
@_doc_params(scatter_temp=doc_scatter_basic, show_save_ax=doc_show_save_ax)
def scatter(
adata: AnnData,
x: Optional[str] = None,
y: Optional[str] = None,
color: Union[str, Collection[str]] = None,
use_raw: Optional[bool] = None,
layers: Union[str, Collection[str]] = None,
sort_order: bool = True,
alpha: Optional[float] = None,
basis: Optional[str] = None,
groups: Union[str, Iterable[str]] = None,
components: Union[str, Collection[str]] = None,
projection: str = '2d',
legend_loc: str = 'right margin',
legend_fontsize: Union[int, float, str] = None,
legend_fontweight: Union[int, str] = None,
legend_fontoutline: float = None,
color_map: Union[str, Colormap] = None,
palette: Union[Cycler, ListedColormap, ColorLike, Sequence[ColorLike]] = None,
frameon: Optional[bool] = None,
right_margin: Optional[float] = None,
left_margin: Optional[float] = None,
size: Union[int, float, None] = None,
title: Optional[str] = None,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
ax: Optional[Axes] = None,
):
"""\
Scatter plot along observations or variables axes.
Color the plot using annotations of observations (`.obs`), variables
(`.var`) or expression of genes (`.var_names`).
Parameters
----------
adata
Annotated data matrix.
x
x coordinate.
y
y coordinate.
color
Keys for annotations of observations/cells or variables/genes,
or a hex color specification, e.g.,
`'ann1'`, `'#fe57a1'`, or `['ann1', 'ann2']`.
use_raw
Use `raw` attribute of `adata` if present.
layers
Use the `layers` attribute of `adata` if present: specify the layer for
`x`, `y` and `color`. If `layers` is a string, then it is expanded to
`(layers, layers, layers)`.
basis : {{`'pca'`, `'tsne'`, `'umap'`, `'diffmap'`, `'draw_graph_fr'`, etc.}}
String that denotes a plotting tool that computed coordinates.
{scatter_temp}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
args = locals()
if basis is not None:
return _scatter_obs(**args)
if x is None or y is None:
raise ValueError('Either provide a `basis` or `x` and `y`.')
if (
(x in adata.obs.keys() or x in adata.var.index)
and (y in adata.obs.keys() or y in adata.var.index)
and (color is None or color in adata.obs.keys() or color in adata.var.index)
):
return _scatter_obs(**args)
if (
(x in adata.var.keys() or x in adata.obs.index)
and (y in adata.var.keys() or y in adata.obs.index)
and (color is None or color in adata.var.keys() or color in adata.obs.index)
):
adata_T = adata.T
axs = _scatter_obs(
adata=adata_T,
**{name: val for name, val in args.items() if name != 'adata'}
)
# store .uns annotations that were added to the new adata object
adata.uns = adata_T.uns
return axs
raise ValueError(
'`x`, `y`, and potential `color` inputs must all '
'come from either `.obs` or `.var`'
)
def _scatter_obs(
adata: AnnData,
x=None,
y=None,
color=None,
use_raw=None,
layers=None,
sort_order=True,
alpha=None,
basis=None,
groups=None,
components=None,
projection='2d',
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
legend_fontoutline=None,
color_map=None,
palette=None,
frameon=None,
right_margin=None,
left_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None,
):
"""See docstring of scatter."""
sanitize_anndata(adata)
from scipy.sparse import issparse
if use_raw is None and adata.raw is not None: use_raw = True
# Process layers
if (
layers in ['X', None]
or (isinstance(layers, str) and layers in adata.layers.keys())
):
layers = (layers, layers, layers)
elif isinstance(layers, cabc.Collection) and len(layers) == 3:
layers = tuple(layers)
for layer in layers:
if layer not in adata.layers.keys() and layer not in ['X', None]:
raise ValueError(
'`layers` should have elements that are '
'either None or in adata.layers.keys().'
)
else:
raise ValueError(
"`layers` should be a string or a collection of strings "
f"with length 3, had value '{layers}'"
)
if use_raw and layers not in [('X', 'X', 'X'), (None, None, None)]:
ValueError('`use_raw` must be `False` if layers are used.')
if legend_loc not in VALID_LEGENDLOCS:
raise ValueError(
f'Invalid `legend_loc`, need to be one of: {VALID_LEGENDLOCS}.'
)
if components is None: components = '1,2' if '2d' in projection else '1,2,3'
if isinstance(components, str): components = components.split(',')
components = np.array(components).astype(int) - 1
keys = ['grey'] if color is None else [color] if isinstance(color, str) else color
if title is not None and isinstance(title, str):
title = [title]
highlights = adata.uns['highlights'] if 'highlights' in adata.uns else []
if basis is not None:
try:
# ignore the '0th' diffusion component
if basis == 'diffmap': components += 1
Y = adata.obsm['X_' + basis][:, components]
# correct the component vector for use in labeling etc.
if basis == 'diffmap': components -= 1
except KeyError:
raise KeyError(
f'compute coordinates using visualization tool {basis} first'
)
elif x is not None and y is not None:
if use_raw:
if x in adata.obs.columns:
x_arr = adata.obs_vector(x)
else:
x_arr = adata.raw.obs_vector(x)
if y in adata.obs.columns:
y_arr = adata.obs_vector(y)
else:
y_arr = adata.raw.obs_vector(y)
else:
x_arr = adata.obs_vector(x, layer=layers[0])
y_arr = adata.obs_vector(y, layer=layers[1])
Y = np.c_[x_arr, y_arr]
else:
raise ValueError('Either provide a `basis` or `x` and `y`.')
if size is None:
n = Y.shape[0]
size = 120000 / n
if legend_loc.startswith('on data') and legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
elif legend_fontsize is None:
legend_fontsize = rcParams['legend.fontsize']
palette_was_none = False
if palette is None: palette_was_none = True
if isinstance(palette, cabc.Sequence):
if not is_color_like(palette[0]):
palettes = palette
else:
palettes = [palette]
else:
palettes = [palette for _ in range(len(keys))]
for i, palette in enumerate(palettes):
palettes[i] = _utils.default_palette(palette)
if basis is not None:
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis
)
else:
component_name = None
axis_labels = (x, y) if component_name is None else None
show_ticks = True if component_name is None else False
# generate the colors
color_ids = []
categoricals = []
colorbars = []
for ikey, key in enumerate(keys):
c = 'white'
categorical = False # by default, assume continuous or flat color
colorbar = None
# test whether we have categorial or continuous annotation
if key in adata.obs_keys():
if is_categorical_dtype(adata.obs[key]):
categorical = True
else:
c = adata.obs[key]
# coloring according to gene expression
elif (use_raw
and adata.raw is not None
and key in adata.raw.var_names):
c = adata.raw.obs_vector(key)
elif key in adata.var_names:
c = adata.obs_vector(key, layer=layers[2])
elif is_color_like(key): # a flat color
c = key
colorbar = False
else:
raise ValueError(
f'key {key!r} is invalid! pass valid observation annotation, '
f'one of {adata.obs_keys()} or a gene name {adata.var_names}'
)
if colorbar is None:
colorbar = not categorical
colorbars.append(colorbar)
if categorical: categoricals.append(ikey)
color_ids.append(c)
if right_margin is None and len(categoricals) > 0:
if legend_loc == 'right margin': right_margin = 0.5
if title is None and keys[0] is not None:
title = [key.replace('_', ' ') if not is_color_like(key) else '' for key in keys]
axs = scatter_base(
Y,
title=title,
alpha=alpha,
component_name=component_name,
axis_labels=axis_labels,
component_indexnames=components + 1,
projection=projection,
colors=color_ids,
highlights=highlights,
colorbars=colorbars,
right_margin=right_margin,
left_margin=left_margin,
sizes=[size for _ in keys],
color_map=color_map,
show_ticks=show_ticks,
ax=ax,
)
def add_centroid(centroids, name, Y, mask):
Y_mask = Y[mask]
if Y_mask.shape[0] == 0: return
median = np.median(Y_mask, axis=0)
i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1))
centroids[name] = Y_mask[i]
# loop over all categorical annotation and plot it
for i, ikey in enumerate(categoricals):
palette = palettes[i]
key = keys[ikey]
_utils.add_colors_for_categorical_sample_annotation(
adata, key, palette, force_update_colors=not palette_was_none
)
# actually plot the groups
mask_remaining = np.ones(Y.shape[0], dtype=bool)
centroids = {}
if groups is None:
for iname, name in enumerate(adata.obs[key].cat.categories):
if name not in settings.categories_to_ignore:
mask = scatter_group(axs[ikey], key, iname,
adata, Y, projection, size=size, alpha=alpha)
mask_remaining[mask] = False
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
else:
groups = [groups] if isinstance(groups, str) else groups
for name in groups:
if name not in set(adata.obs[key].cat.categories):
raise ValueError(
f'{name!r} is invalid! specify valid name, '
f'one of {adata.obs[key].cat.categories}'
)
else:
iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0]
mask = scatter_group(
axs[ikey], key, iname,
adata, Y, projection,
size=size, alpha=alpha,
)
if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask)
mask_remaining[mask] = False
if mask_remaining.sum() > 0:
data = [Y[mask_remaining, 0], Y[mask_remaining, 1]]
if projection == '3d': data.append(Y[mask_remaining, 2])
axs[ikey].scatter(
*data,
marker='.', c='lightgrey', s=size,
edgecolors='none', zorder=-1,
)
legend = None
if legend_loc.startswith('on data'):
if legend_fontweight is None:
legend_fontweight = 'bold'
if legend_fontoutline is not None:
path_effect = [patheffects.withStroke(
linewidth=legend_fontoutline,
foreground='w',
)]
else:
path_effect = None
for name, pos in centroids.items():
axs[ikey].text(
pos[0], pos[1], name,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize,
path_effects=path_effect,
)
all_pos = np.zeros((len(adata.obs[key].cat.categories), 2))
for iname, name in enumerate(adata.obs[key].cat.categories):
if name in centroids:
all_pos[iname] = centroids[name]
else:
all_pos[iname] = [np.nan, np.nan]
_utils._tmp_cluster_pos = all_pos
if legend_loc == 'on data export':
filename = settings.writedir / 'pos.csv'
logg.warning(f'exporting label positions to {filename}')
settings.writedir.mkdir(parents=True, exist_ok=True)
np.savetxt(filename, all_pos, delimiter=',')
elif legend_loc == 'right margin':
legend = axs[ikey].legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(
1 if len(adata.obs[key].cat.categories) <= 14 else
2 if len(adata.obs[key].cat.categories) <= 30 else
3
),
fontsize=legend_fontsize)
elif legend_loc != 'none':
legend = axs[ikey].legend(
frameon=False, loc=legend_loc, fontsize=legend_fontsize
)
if legend is not None:
for handle in legend.legendHandles: handle.set_sizes([300.0])
# draw a frame around the scatter
frameon = settings._frameon if frameon is None else frameon
if not frameon and x is None and y is None:
for ax in axs:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_frame_on(False)
_utils.savefig_or_show('scatter' if basis is None else basis, show=show, save=save)
if show == False: return axs if len(keys) > 1 else axs[0]
def ranking(
adata: AnnData,
attr: str,
keys: Union[str, Sequence[str]],
dictionary=None,
indices=None,
labels=None,
color='black',
n_points=30,
log=False,
include_lowest=False,
show=None,
):
"""\
Plot rankings.
See, for example, how this is used in pl.pca_ranking.
Parameters
----------
adata
The data.
attr: {`'var'`, `'obs'`, `'uns'`, `'varm'`, `'obsm'`}
The attribute of AnnData that contains the score.
keys
The scores to look up an array from the attribute of adata.
Returns
-------
Returns matplotlib gridspec with access to the axes.
"""
if isinstance(keys, str) and indices is not None:
scores = getattr(adata, attr)[keys][:, indices]
keys = ['{}{}'.format(keys[:-1], i+1) for i in indices]
else:
if dictionary is None:
scores = getattr(adata, attr)[keys]
else:
scores = getattr(adata, attr)[dictionary][keys]
n_panels = len(keys) if isinstance(keys, list) else 1
if n_panels == 1: scores, keys = scores[:, None], [keys]
if log: scores = np.log(scores)
if labels is None:
labels = adata.var_names if attr in {'var', 'varm'} else np.arange(scores.shape[0]).astype(str)
if isinstance(labels, str):
labels = [labels + str(i+1) for i in range(scores.shape[0])]
if n_panels <= 5: n_rows, n_cols = 1, n_panels
else: n_rows, n_cols = 2, int(n_panels/2 + 0.5)
fig = pl.figure(figsize=(
n_cols * rcParams['figure.figsize'][0],
n_rows * rcParams['figure.figsize'][1],
))
left, bottom = 0.2/n_cols, 0.13/n_rows
gs = gridspec.GridSpec(
wspace=0.2,
nrows=n_rows, ncols=n_cols,
left=left, bottom=bottom,
right=1 - (n_cols-1)*left - 0.01/n_cols,
top=1 - (n_rows-1)*bottom - 0.1/n_rows,
)
for iscore, score in enumerate(scores.T):
pl.subplot(gs[iscore])
order_scores = np.argsort(score)[::-1]
if not include_lowest:
indices = order_scores[:n_points+1]
else:
indices = order_scores[:n_points//2]
neg_indices = order_scores[-(n_points-(n_points//2)):]
txt_args = dict(
color=color,
rotation='vertical',
verticalalignment='bottom',
horizontalalignment='center',
fontsize=8,
)
for ig, g in enumerate(indices):
pl.text(ig, score[g], labels[g], **txt_args)
if include_lowest:
score_mid = (score[g] + score[neg_indices[0]]) / 2
pl.text(len(indices), score_mid, '⋮', **txt_args)
for ig, g in enumerate(neg_indices):
pl.text(ig+len(indices)+2, score[g], labels[g], **txt_args)
pl.xticks([])
pl.title(keys[iscore].replace('_', ' '))
if n_panels <= 5 or iscore > n_cols: pl.xlabel('ranking')
pl.xlim(-0.9, n_points + 0.9 + (1 if include_lowest else 0))
score_min, score_max = np.min(score[neg_indices if include_lowest else indices]), np.max(score[indices])
pl.ylim(
(0.95 if score_min > 0 else 1.05) * score_min,
(1.05 if score_max > 0 else 0.95) * score_max,
)
if show == False: return gs
@_doc_params(show_save_ax=doc_show_save_ax)
def violin(
adata: AnnData,
keys: Union[str, Sequence[str]],
groupby: Optional[str] = None,
log: bool = False,
use_raw: Optional[bool] = None,
stripplot: bool = True,
jitter: Union[float, bool] = True,
size: int = 1,
scale: str = 'width',
order: Optional[Sequence[str]] = None,
multi_panel: Optional[bool] = None,
xlabel: str = '',
rotation: Optional[float] = None,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
**kwds,
):
"""\
Violin plot.
Wraps :func:`seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
adata
Annotated data matrix.
keys
Keys for accessing variables of `.var_names` or fields of `.obs`.
groupby
The key of the observation grouping to consider.
log
Plot on logarithmic axis.
use_raw
Use `raw` attribute of `adata` if present.
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
size
Size of the jitter points.
scale : {{`'area'`, `'count'`, `'width'`}}
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
order
Order in which to show the categories.
multi_panel
Display keys in multiple panels also when `groupby is not None`.
xlabel
Label of the x axis. Defaults to `groupby` if `rotation` is `None`,
otherwise, no label is shown.
rotation
Rotation of xtick labels.
{show_save_ax}
**kwds
Are passed to :func:`~seaborn.violinplot`.
Returns
-------
A :class:`~matplotlib.axes.Axes` object if `ax` is `None` else `None`.
"""
import seaborn as sns # Slow import, only import if called
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(keys, str): keys = [keys]
if groupby is not None:
obs_df = get.obs_df(adata, keys=[groupby] + keys, use_raw=use_raw)
else:
obs_df = get.obs_df(adata, keys=keys, use_raw=use_raw)
if groupby is None:
obs_tidy = pd.melt(obs_df, value_vars=keys)
x = 'variable'
ys = ['value']
else:
obs_tidy = obs_df
x = groupby
ys = keys
if multi_panel and groupby is None and len(ys) == 1:
# This is a quick and dirty way for adapting scales across several
# keys if groupby is None.
y = ys[0]
g = sns.FacetGrid(obs_tidy, col=x, col_order=keys, sharey=False)
# don't really know why this gives a warning without passing `order`
g = g.map(sns.violinplot, y, inner=None, orient='vertical',
scale=scale, order=keys, **kwds)
if stripplot:
g = g.map(sns.stripplot, y, orient='vertical', jitter=jitter, size=size, order=keys,
color='black')
if log:
g.set(yscale='log')
g.set_titles(col_template='{col_name}').set_xlabels('')
if rotation is not None:
for ax in g.axes[0]:
ax.tick_params(axis='x', labelrotation=rotation)
else:
if ax is None:
axs, _, _, _ = setup_axes(
ax=ax,
panels=['x'] if groupby is None else keys,
show_ticks=True,
right_margin=0.3,
)
else:
axs = [ax]
for ax, y in zip(axs, ys):
ax = sns.violinplot(x, y=y, data=obs_tidy, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, **kwds)
if stripplot:
ax = sns.stripplot(x, y=y, data=obs_tidy, order=order,
jitter=jitter, color='black', size=size, ax=ax)
if xlabel == '' and groupby is not None and rotation is None:
xlabel = groupby.replace('_', ' ')
ax.set_xlabel(xlabel)
if log:
ax.set_yscale('log')
if rotation is not None:
ax.tick_params(axis='x', labelrotation=rotation)
_utils.savefig_or_show('violin', show=show, save=save)
if show is False:
if multi_panel and groupby is None and len(ys) == 1:
return g
elif len(axs) == 1:
return axs[0]
else:
return axs
@_doc_params(show_save_ax=doc_show_save_ax)
def clustermap(
adata: AnnData,
obs_keys: str = None,
use_raw: Optional[bool] = None,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
**kwds,
):
"""\
Hierarchically-clustered heatmap.
Wraps :func:`seaborn.clustermap` for :class:`~anndata.AnnData`.
Parameters
----------
adata
Annotated data matrix.
obs_keys
Categorical annotation to plot with a different color map.
Currently, only a single key is supported.
use_raw
Use `raw` attribute of `adata` if present.
{show_save_ax}
**kwds
Keyword arguments passed to :func:`~seaborn.clustermap`.
Returns
-------
If `show == False`, a :class:`~seaborn.ClusterGrid` object
(see :func:`~seaborn.clustermap`).
Examples
--------
Soon to come with figures. In the meanwile, see :func:`~seaborn.clustermap`.
>>> import scanpy as sc
>>> adata = sc.datasets.krumsiek11()
>>> sc.pl.clustermap(adata, obs_keys='cell_type')
"""
import seaborn as sns # Slow import, only import if called
if not isinstance(obs_keys, (str, type(None))):
raise ValueError('Currently, only a single key is supported.')
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
X = adata.raw.X if use_raw else adata.X
if issparse(X):
X = X.toarray()
df = pd.DataFrame(X, index=adata.obs_names, columns=adata.var_names)
if obs_keys is not None:
row_colors = adata.obs[obs_keys]
_utils.add_colors_for_categorical_sample_annotation(adata, obs_keys)
# do this more efficiently... just a quick solution
lut = dict(zip(
row_colors.cat.categories,
adata.uns[obs_keys + '_colors']))
row_colors = adata.obs[obs_keys].map(lut)
g = sns.clustermap(df, row_colors=row_colors.values, **kwds)
else:
g = sns.clustermap(df, **kwds)
show = settings.autoshow if show is None else show
if show: pl.show()
else: return g
@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args)
def stacked_violin(
adata: AnnData,
var_names,
groupby=None,
log=False,
use_raw=None,
num_categories=7,
figsize=None,
dendrogram=False,
gene_symbols=None,
var_group_positions=None,
var_group_labels=None,
standard_scale: Optional[str] = None,
var_group_rotation=None,
layer=None,
stripplot: bool = False,
jitter: Union[float, bool] = False,
size: int = 1,
scale: str = 'width',
order: Optional[Sequence[str]] = None,
swap_axes: bool = False,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
row_palette: str = 'muted',
**kwds,
):
"""\
Stacked violin plots.
Makes a compact image composed of individual violin plots
(from :func:`~seaborn.violinplot`) stacked on top of each other.
Useful to visualize gene expression per cluster.
Wraps :func:`seaborn.violinplot` for :class:`~anndata.AnnData`.
Parameters
----------
{common_plot_args}
stripplot
Add a stripplot on top of the violin plot.
See :func:`~seaborn.stripplot`.
jitter
Add jitter to the stripplot (only when stripplot is True)
See :func:`~seaborn.stripplot`.
size
Size of the jitter points.
order
Order in which to show the categories.
scale: {{`'area'`, `'count'`, `'width'`}}
The method used to scale the width of each violin.
If 'width' (the default), each violin will have the same width.
If 'area', each violin will have the same area.
If 'count', a violin’s width corresponds to the number of observations.
row_palette
The row palette determines the colors to use for the stacked violins.
The value should be a valid seaborn or matplotlib palette name
(see :func:`~seaborn.color_palette`).
Alternatively, a single color name or hex value can be passed,
e.g. `'red'` or `'#cc33ff'`.
standard_scale: {{`'var'`, `'obs'`}}
Whether or not to standardize a dimension between 0 and 1,
meaning for each variable or observation,
subtract the minimum and divide each by its maximum.
swap_axes
By default, the x axis contains `var_names` (e.g. genes) and the y axis the `groupby` categories.
By setting `swap_axes` then x are the `groupby` categories and y the `var_names`. When swapping
axes var_group_positions are no longer used
{show_save_ax}
**kwds
Are passed to :func:`~seaborn.violinplot`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.stacked_violin(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_stacked_violin: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
import seaborn as sns # Slow import, only import if called
if use_raw is None and adata.raw is not None: use_raw = True
var_names, var_group_labels, var_group_positions = _check_var_names_type(var_names,
var_group_labels, var_group_positions)
has_var_groups = True if var_group_positions is not None and len(var_group_positions) > 0 else False
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
gene_symbols=gene_symbols, layer=layer)
if standard_scale == 'obs':
obs_tidy = obs_tidy.sub(obs_tidy.min(1), axis=0)
obs_tidy = obs_tidy.div(obs_tidy.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
obs_tidy -= obs_tidy.min(0)
obs_tidy = (obs_tidy / obs_tidy.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning('Unknown type for standard_scale, ignored')
if 'color' in kwds:
row_palette = kwds['color']
# remove color from kwds in case is set to avoid an error caused by
# double parameters
del (kwds['color'])
if 'linewidth' not in kwds:
# for the tiny violin plots used, is best
# to use a thin lindwidth.
kwds['linewidth'] = 0.5
# set by default the violin plot cut=0 to limit the extend
# of the violin plot as this produces better plots that wont extend
# to negative values for example. From seaborn.violin documentation:
#
# cut: Distance, in units of bandwidth size, to extend the density past
# the extreme datapoints. Set to 0 to limit the violin range within
# the range of the observed data (i.e., to have the same effect as
# trim=True in ggplot.
if 'cut' not in kwds:
kwds['cut'] = 0
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder obs_tidy
if dendro_data['var_names_idx_ordered'] is not None:
obs_tidy = obs_tidy.iloc[:, dendro_data['var_names_idx_ordered']]
var_names = [var_names[x] for x in dendro_data['var_names_idx_ordered']]
obs_tidy.index = obs_tidy.index.reorder_categories(
[categories[x] for x in dendro_data['categories_idx_ordered']], ordered=True)
categories = [categories[x] for x in dendro_data['categories_idx_ordered']]
global count
count = 0
def rename_cols_to_int(value):
global count
count += 1
return count
# All columns should have a unique name, otherwise the
# pd.melt object that is passed to seaborn will merge non-unique columns.
# Here, I simply rename the columns using a count from 1..n using the
# mapping function `rename_cols_to_int` to solve the problem.
obs_tidy.rename(rename_cols_to_int, axis='columns', inplace=True)
if not swap_axes:
# plot image in which x = var_names and y = groupby categories
dendro_width = 1.4 if dendrogram else 0
if figsize is None:
height = len(categories) * 0.2 + 3
width = len(var_names) * 0.2 + 1 + dendro_width
else:
width, height = figsize
num_rows = len(categories)
height_ratios = None
if has_var_groups:
# add some space in case 'brackets' want to be plotted on top of the image
num_rows += 2 # +2 to add the row for the brackets and a spacer
height_ratios = [0.2, 0.05] + [float(height) / len(categories)] * len(categories)
categories = [None, None] + list(categories)
fig = pl.figure(figsize=(width, height))
# define a layout of nrows = len(categories) rows x 2 columns
# each row is one violin plot. Second column is reserved for dendrogram (if any)
# if var_group_positions is defined, a new row is added
axs = gridspec.GridSpec(nrows=num_rows, ncols=2, height_ratios=height_ratios,
width_ratios=[width, dendro_width], wspace=0.08)
axs_list = []
if dendrogram:
first_plot_idx = 1 if has_var_groups else 0
dendro_ax = fig.add_subplot(axs[first_plot_idx:, 1])
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram)
axs_list.append(dendro_ax)
ax0 = None
if is_color_like(row_palette):
row_colors = [row_palette] * len(categories)
else:
row_colors = sns.color_palette(row_palette, n_colors=len(categories))
for idx in range(num_rows)[::-1]: # iterate in reverse to start on the bottom plot
# this facilitates adding the brackets plot (if
# needed) by sharing the x axis with a previous
# violing plot.
category = categories[idx]
if has_var_groups and idx <= 1:
# if var_group_positions is given, axs[0] and axs[1] are the location for the
# brackets and a spacer (axs[1])
if idx == 0:
brackets_ax = fig.add_subplot(axs[0], sharex=ax0)
_plot_gene_groups_brackets(brackets_ax, group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation)
continue
df = pd.melt(obs_tidy[obs_tidy.index == category], value_vars=obs_tidy.columns)
if ax0 is None:
ax = fig.add_subplot(axs[idx, 0])
ax0 = ax
else:
ax = fig.add_subplot(axs[idx, 0])
axs_list.append(ax)
ax = sns.violinplot('variable', y='value', data=df, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, color=row_colors[idx], **kwds)
if stripplot:
ax = sns.stripplot('variable', y='value', data=df, order=order,
jitter=jitter, color='black', size=size, ax=ax)
# remove the grids because in such a compact plot are unnecessary
ax.grid(False)
ax.tick_params(axis='y', left=False, right=True, labelright=True,
labelleft=False, labelsize='x-small', length=1, pad=1)
ax.set_ylabel(category, rotation=0, fontsize='small', labelpad=8, ha='right', va='center')
ax.set_xlabel('')
if log:
ax.set_yscale('log')
if idx < num_rows - 1:
# remove the xticks labels except for the last processed plot (first from bottom-up).
# Because the plots share the x axis it is redundant and less compact to plot the
# axis ticks and labels for each plot
ax.set_xticklabels([])
ax.tick_params(axis='x', bottom=False, top=False, labeltop=False, labelbottom=False)
else:
ax.set_xticklabels(var_names)
ax0.tick_params(axis='x', labelrotation=90, labelsize='small')
else:
# plot image in which x = group by and y = var_names
dendro_height = 3 if dendrogram else 0
vargroups_width = 0.45 if has_var_groups else 0
if figsize is None:
height = len(var_names) * 0.3 + dendro_height
width = len(categories) * 0.4 + vargroups_width
else:
width, height = figsize
fig = pl.figure(figsize=(width, height))
# define a layout of nrows = var_names x 1 columns
# if plot dendrogram a row is added
# each row is one violin plot.
num_rows = len(var_names) + 1 # +1 to account for dendrogram
height_ratios = [dendro_height] + ([1] * len(var_names))
axs = gridspec.GridSpec(nrows=num_rows, ncols=2,
height_ratios=height_ratios, wspace=0.2,
width_ratios=[width - vargroups_width, vargroups_width])
axs_list = []
if dendrogram:
dendro_ax = fig.add_subplot(axs[0])
_plot_dendrogram(dendro_ax, adata, groupby, orientation='top', dendrogram_key=dendrogram)
axs_list.append(dendro_ax)
first_ax = None
if is_color_like(row_palette):
row_colors = [row_palette] * len(var_names)
else:
row_colors = sns.color_palette(row_palette, n_colors=len(var_names))
for idx, y in enumerate(obs_tidy.columns):
ax_idx = idx + 1 # +1 to account that idx 0 is the dendrogram
if first_ax is None:
ax = fig.add_subplot(axs[ax_idx, 0])
first_ax = ax
else:
ax = fig.add_subplot(axs[ax_idx, 0])
axs_list.append(ax)
ax = sns.violinplot(x=obs_tidy.index, y=y, data=obs_tidy, inner=None, order=order,
orient='vertical', scale=scale, ax=ax, color=row_colors[idx], **kwds)
if stripplot:
ax = sns.stripplot(x=obs_tidy.index, y=y, data=obs_tidy, order=order,
jitter=jitter, color='black', size=size, ax=ax)
ax.set_ylabel(var_names[idx], rotation=0, fontsize='small', labelpad=8, ha='right', va='center')
# remove the grids because in such a compact plot are unnecessary
ax.grid(False)
ax.tick_params(axis='y', right=True, labelright=True, left=False,
labelleft=False, labelrotation=0, labelsize='x-small')
ax.tick_params(axis='x', labelsize='small')
# remove the xticks labels except for the last processed plot (first from bottom-up).
# Because the plots share the x axis it is redundant and less compact to plot the
# axis for each plot
if idx < len(var_names) - 1:
ax.tick_params(labelbottom=False, labeltop=False, bottom=False, top=False)
ax.set_xlabel('')
if log:
ax.set_yscale('log')
if max([len(x) for x in categories]) > 1:
ax.tick_params(axis='x', labelrotation=90)
if has_var_groups:
start = 1 if dendrogram else 0
gene_groups_ax = fig.add_subplot(axs[start:, 1])
arr = []
for idx, pos in enumerate(var_group_positions):
arr += [idx] * (pos[1]+1 - pos[0])
_plot_gene_groups_brackets(gene_groups_ax, var_group_positions, var_group_labels,
left_adjustment=0.3, right_adjustment=0.7, orientation='right')
gene_groups_ax.set_ylim(len(var_names), 0)
axs_list.append(gene_groups_ax)
# remove the spacing between subplots
pl.subplots_adjust(wspace=0, hspace=0)
_utils.savefig_or_show('stacked_violin', show=show, save=save)
return axs_list
@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args)
def heatmap(
adata: AnnData,
var_names,
groupby=None,
use_raw=None,
log=False,
num_categories=7,
dendrogram=False,
gene_symbols=None,
var_group_positions=None,
var_group_labels=None,
var_group_rotation=None,
layer=None,
standard_scale=None,
swap_axes=False,
show_gene_labels=None,
show=None,
save=None,
figsize=None,
**kwds,
):
"""\
Heatmap of the expression values of genes.
If `groupby` is given, the heatmap is ordered by the respective group. For
example, a list of marker genes can be plotted, ordered by clustering. If
the `groupby` observation annotation is not categorical the observation
annotation is turned into a categorical by binning the data into the number
specified in `num_categories`.
Parameters
----------
{common_plot_args}
standard_scale : {{`'var'`, `'obs'`}}, optional (default: `None`)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or observation,
subtract the minimum and divide each by its maximum.
swap_axes: `bool`, optional (default: `False`)
By default, the x axis contains `var_names` (e.g. genes) and the y axis the `groupby`
categories (if any). By setting `swap_axes` then x are the `groupby` categories and y the `var_names`.
show_gene_labels: `bool`, optional (default: `None`).
By default gene labels are shown when there are 50 or less genes. Otherwise the labels are removed.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.imshow`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True, swap_axes=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_heatmap: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
if use_raw is None and adata.raw is not None: use_raw = True
var_names, var_group_labels, var_group_positions = _check_var_names_type(var_names,
var_group_labels, var_group_positions)
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
gene_symbols=gene_symbols, layer=layer)
if standard_scale == 'obs':
obs_tidy = obs_tidy.sub(obs_tidy.min(1), axis=0)
obs_tidy = obs_tidy.div(obs_tidy.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
obs_tidy -= obs_tidy.min(0)
obs_tidy = (obs_tidy / obs_tidy.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning('Unknown type for standard_scale, ignored')
if groupby is None or len(categories) <= 1:
categorical = False
# dendrogram can only be computed between groupby categories
dendrogram = False
else:
categorical = True
# get categories colors:
if groupby + "_colors" in adata.uns:
groupby_colors = adata.uns[groupby + "_colors"]
else:
groupby_colors = None
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder obs_tidy
if dendro_data['var_names_idx_ordered'] is not None:
obs_tidy = obs_tidy.iloc[:, dendro_data['var_names_idx_ordered']]
var_names = [var_names[x] for x in dendro_data['var_names_idx_ordered']]
obs_tidy.index = obs_tidy.index.reorder_categories(
[categories[x] for x in dendro_data['categories_idx_ordered']], ordered=True)
# reorder groupby colors
if groupby_colors is not None:
groupby_colors = [groupby_colors[x] for x in dendro_data['categories_idx_ordered']]
if show_gene_labels is None:
if len(var_names) <= 50:
show_gene_labels = True
else:
show_gene_labels = False
logg.warning(
'Gene labels are not shown when more than 50 genes are visualized. '
'To show gene labels set `show_gene_labels=True`'
)
if categorical:
obs_tidy = obs_tidy.sort_index()
colorbar_width = 0.2
if not swap_axes:
# define a layout of 2 rows x 4 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row is divided into three axes:
# first ax is for the categories defined by `groupby`
# second ax is for the heatmap
# third ax is for the dendrogram
# fourth ax is for colorbar
dendro_width = 1 if dendrogram else 0
groupby_width = 0.2 if categorical else 0
if figsize is None:
height = 6
if show_gene_labels:
heatmap_width = len(var_names) * 0.3
else:
heatmap_width = 8
width = heatmap_width + dendro_width + groupby_width
else:
width, height = figsize
heatmap_width = width - (dendro_width + groupby_width)
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.15, height]
else:
height_ratios = [0, height]
width_ratios = [groupby_width, heatmap_width, dendro_width, colorbar_width]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=4, width_ratios=width_ratios, wspace=0.15 / width,
hspace=0.13 / height, height_ratios=height_ratios)
heatmap_ax = fig.add_subplot(axs[1, 1])
im = heatmap_ax.imshow(obs_tidy.values, aspect='auto', **kwds)
heatmap_ax.set_ylim(obs_tidy.shape[0] - 0.5, -0.5)
heatmap_ax.set_xlim(-0.5, obs_tidy.shape[1] - 0.5)
heatmap_ax.tick_params(axis='y', left=False, labelleft=False)
heatmap_ax.set_ylabel('')
heatmap_ax.grid(False)
# sns.heatmap(obs_tidy, yticklabels="auto", ax=heatmap_ax, cbar_ax=heatmap_cbar_ax, **kwds)
if show_gene_labels:
heatmap_ax.tick_params(axis='x', labelsize='small')
heatmap_ax.set_xticks(np.arange(len(var_names)))
heatmap_ax.set_xticklabels(var_names, rotation=90)
else:
heatmap_ax.tick_params(axis='x', labelbottom=False, bottom=False)
# plot colorbar
_plot_colorbar(im, fig, axs[1, 3])
if categorical:
groupby_ax = fig.add_subplot(axs[1, 0])
ticks, labels, groupby_cmap, norm = _plot_categories_as_colorblocks(groupby_ax, obs_tidy,
colors=groupby_colors, orientation='left')
# add lines to main heatmap
line_positions = np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
heatmap_ax.hlines(line_positions, -0.73, len(var_names) - 0.5, lw=0.6,
zorder=10, clip_on=False)
if dendrogram:
dendro_ax = fig.add_subplot(axs[1, 2], sharey=heatmap_ax)
_plot_dendrogram(dendro_ax, adata, groupby, ticks=ticks, dendrogram_key=dendrogram)
# plot group legends on top of heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 1], sharex=heatmap_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels, rotation=var_group_rotation,
left_adjustment=-0.3, right_adjustment=0.3)
# swap axes case
else:
# define a layout of 3 rows x 3 columns
# The first row is for the dendrogram (if not dendrogram height is zero)
# second row is for main content. This col is divided into three axes:
# first ax is for the heatmap
# second ax is for 'brackets' if any (othwerise width is zero)
# third ax is for colorbar
dendro_height = 0.8 if dendrogram else 0
groupby_height = 0.13 if categorical else 0
if figsize is None:
if show_gene_labels:
heatmap_height = len(var_names) * 0.18
else:
heatmap_height = 4
width = 10
height = heatmap_height + dendro_height + groupby_height # +2 to account for labels
else:
width, height = figsize
heatmap_height = height - (dendro_height + groupby_height)
height_ratios = [dendro_height, heatmap_height, groupby_height]
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
width_ratios = [width, 0.14, colorbar_width]
else:
width_ratios = [width, 0, colorbar_width]
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=3, ncols=3, wspace=0.25 / width,
hspace=0.3 / height,
width_ratios=width_ratios,
height_ratios=height_ratios)
# plot heatmap
heatmap_ax = fig.add_subplot(axs[1, 0])
im = heatmap_ax.imshow(obs_tidy.T.values, aspect='auto', **kwds)
heatmap_ax.set_xlim(0, obs_tidy.shape[0])
heatmap_ax.set_ylim(obs_tidy.shape[1] - 0.5, -0.5)
heatmap_ax.tick_params(axis='x', bottom=False, labelbottom=False)
heatmap_ax.set_xlabel('')
heatmap_ax.grid(False)
if show_gene_labels:
heatmap_ax.tick_params(axis='y', labelsize='small', length=1)
heatmap_ax.set_yticks(np.arange(len(var_names)))
heatmap_ax.set_yticklabels(var_names, rotation=0)
else:
heatmap_ax.tick_params(axis='y', labelleft=False, left=False)
if categorical:
groupby_ax = fig.add_subplot(axs[2, 0])
ticks, labels, groupby_cmap, norm = _plot_categories_as_colorblocks(groupby_ax, obs_tidy, colors=groupby_colors,
orientation='bottom')
# add lines to main heatmap
line_positions = np.cumsum(obs_tidy.index.value_counts(sort=False))[:-1] - 0.5
heatmap_ax.vlines(line_positions, -0.5, len(var_names) + .35, lw=0.6,
zorder=10, clip_on=False)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0, 0], sharex=heatmap_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=ticks, orientation='top')
# plot group legends next to the heatmap_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1, 1])
arr = []
for idx, pos in enumerate(var_group_positions):
arr += [idx] * (pos[1]+1 - pos[0])
gene_groups_ax.imshow(np.matrix(arr).T, aspect='auto', cmap=groupby_cmap, norm=norm)
gene_groups_ax.axis('off')
# plot colorbar
_plot_colorbar(im, fig, axs[1, 2])
_utils.savefig_or_show('heatmap', show=show, save=save)
return axs
@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args)
def dotplot(
adata: AnnData,
var_names,
groupby=None,
use_raw=None,
log=False,
num_categories=7,
expression_cutoff=0.,
mean_only_expressed=False,
color_map='Reds',
dot_max=None,
dot_min=None,
figsize=None,
dendrogram=False,
gene_symbols=None,
var_group_positions=None,
standard_scale=None,
smallest_dot=0.,
var_group_labels=None,
var_group_rotation=None,
layer=None,
show=None,
save=None,
**kwds,
):
"""\
Makes a *dot plot* of the expression values of `var_names`.
For each var_name and each `groupby` category a dot is plotted. Each dot
represents two values: mean expression within each category (visualized by
color) and fraction of cells expressing the var_name in the
category (visualized by the size of the dot). If groupby is not given, the
dotplot assumes that all data belongs to a single category.
**Note**: A gene is considered expressed if the expression value in the adata
(or adata.raw) is above the specified threshold which is zero by default.
An example of dotplot usage is to visualize, for multiple marker genes,
the mean value and the percentage of cells expressing the gene accross multiple clusters.
Parameters
----------
{common_plot_args}
expression_cutoff : `float` (default: `0.`)
Expression cutoff that is used for binarizing the gene expression and determining the fraction
of cells expressing given genes. A gene is expressed only if the expression value is greater than
this threshold.
mean_only_expressed : `bool` (default: `False`)
If True, gene expression is averaged only over the cells expressing the given genes.
color_map : `str`, optional (default: `Reds`)
String denoting matplotlib color map.
dot_max : `float` optional (default: `None`)
If none, the maximum dot size is set to the maximum fraction value found (e.g. 0.6). If given,
the value should be a number between 0 and 1. All fractions larger than dot_max are clipped to
this value.
dot_min : `float` optional (default: `None`)
If none, the minimum dot size is set to 0. If given,
the value should be a number between 0 and 1. All fractions smaller than dot_min are clipped to
this value.
standard_scale : {{`'var'`, `'group'`}}, optional (default: `None`)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
smallest_dot : `float` optional (default: 0.)
If none, the smallest dot has size 0. All expression levels with `dot_min` are potted with
`smallest_dot` dot size.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.scatter`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
-------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_dotplot: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
if use_raw is None and adata.raw is not None: use_raw = True
var_names, var_group_labels, var_group_positions = _check_var_names_type(var_names,
var_group_labels, var_group_positions)
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
layer=layer, gene_symbols=gene_symbols)
# for if category defined by groupby (if any) compute for each var_name
# 1. the fraction of cells in the category having a value > expression_cutoff
# 2. the mean value over the category
# 1. compute fraction of cells having value > expression_cutoff
# transform obs_tidy into boolean matrix using the expression_cutoff
obs_bool = obs_tidy > expression_cutoff
# compute the sum per group which in the boolean matrix this is the number
# of values > expression_cutoff, and divide the result by the total number of values
# in the group (given by `count()`)
fraction_obs = obs_bool.groupby(level=0).sum() / obs_bool.groupby(level=0).count()
# 2. compute mean value
if mean_only_expressed:
mean_obs = obs_tidy.mask(~obs_bool).groupby(level=0).mean().fillna(0)
else:
mean_obs = obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
mean_obs = mean_obs.sub(mean_obs.min(1), axis=0)
mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
mean_obs -= mean_obs.min(0)
mean_obs = (mean_obs / mean_obs.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning('Unknown type for standard_scale, ignored')
dendro_width = 0.8 if dendrogram else 0
colorbar_width = 0.2
colorbar_width_spacer = 0.5
size_legend_width = 0.25
if figsize is None:
height = len(categories) * 0.3 + 1 # +1 for labels
# if the number of categories is small (eg 1 or 2) use
# a larger height
height = max([1.5, height])
heatmap_width = len(var_names) * 0.35
width = heatmap_width + colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer
else:
width, height = figsize
heatmap_width = width - (colorbar_width + size_legend_width + dendro_width + colorbar_width_spacer)
# colorbar ax width should not change with differences in the width of the image
# otherwise can become too small
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.5, 10]
else:
height_ratios = [0, 10.5]
# define a layout of 2 rows x 5 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row
# is divided into 4 axes:
# first ax is for the main figure
# second ax is for dendrogram (if present)
# third ax is for the color bar legend
# fourth ax is for an spacer that avoids the ticks
# from the color bar to be hidden beneath the size lengend axis
# fifth ax is to plot the size legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=5, wspace=0.02, hspace=0.04,
width_ratios=[heatmap_width, dendro_width, colorbar_width, colorbar_width_spacer, size_legend_width],
height_ratios=height_ratios)
if len(categories) < 4:
# when few categories are shown, the colorbar and size legend
# need to be larger than the main plot, otherwise they would look
# compressed. For this, the dotplot ax is split into two:
axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 0],
height_ratios=[len(categories) * 0.3, 1])
dot_ax = fig.add_subplot(axs2[0])
else:
dot_ax = fig.add_subplot(axs[1, 0])
color_legend = fig.add_subplot(axs[1, 2])
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder matrix
if dendro_data['var_names_idx_ordered'] is not None:
# reorder columns (usually genes) if needed. This only happens when
# var_group_positions and var_group_labels is set
mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']]
fraction_obs = fraction_obs.iloc[:, dendro_data['var_names_idx_ordered']]
# reorder rows (categories) to match the dendrogram order
mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :]
fraction_obs = fraction_obs.iloc[dendro_data['categories_idx_ordered'], :]
y_ticks = range(mean_obs.shape[0])
dendro_ax = fig.add_subplot(axs[1, 1], sharey=dot_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks)
# to keep the size_legen of about the same height, irrespective
# of the number of categories, the fourth ax is subdivided into two parts
size_legend_height = min(1.3, height)
# wspace is proportional to the width but a constant value is
# needed such that the spacing is the same for thinner or wider images.
wspace = 10.5 / width
axs3 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[1, 4], wspace=wspace,
height_ratios=[size_legend_height / height,
(height - size_legend_height) / height])
# make scatter plot in which
# x = var_names
# y = groupby category
# size = fraction
# color = mean expression
y, x = np.indices(mean_obs.shape)
y = y.flatten()
x = x.flatten()
frac = fraction_obs.values.flatten()
mean_flat = mean_obs.values.flatten()
cmap = pl.get_cmap(color_map)
if dot_max is None:
dot_max = np.ceil(max(frac) * 10) / 10
else:
if dot_max < 0 or dot_max > 1:
raise ValueError("`dot_max` value has to be between 0 and 1")
if dot_min is None:
dot_min = 0
else:
if dot_min < 0 or dot_min > 1:
raise ValueError("`dot_min` value has to be between 0 and 1")
if dot_min != 0 or dot_max != 1:
# clip frac between dot_min and dot_max
frac = np.clip(frac, dot_min, dot_max)
old_range = dot_max - dot_min
# re-scale frac between 0 and 1
frac = ((frac - dot_min) / old_range)
size = (frac * 10) ** 2
size += smallest_dot
import matplotlib.colors
normalize = matplotlib.colors.Normalize(vmin=kwds.get('vmin'), vmax=kwds.get('vmax'))
colors = cmap(normalize(mean_flat))
dot_ax.scatter(x, y, color=colors, s=size, cmap=cmap, norm=None, edgecolor='none', **kwds)
y_ticks = range(mean_obs.shape[0])
dot_ax.set_yticks(y_ticks)
dot_ax.set_yticklabels([mean_obs.index[idx] for idx in y_ticks])
x_ticks = range(mean_obs.shape[1])
dot_ax.set_xticks(x_ticks)
dot_ax.set_xticklabels([mean_obs.columns[idx] for idx in x_ticks], rotation=90)
dot_ax.tick_params(axis='both', labelsize='small')
dot_ax.grid(False)
dot_ax.set_xlim(-0.5, len(var_names) + 0.5)
dot_ax.set_ylabel(groupby)
# to be consistent with the heatmap plot, is better to
# invert the order of the y-axis, such that the first group is on
# top
ymin, ymax = dot_ax.get_ylim()
dot_ax.set_ylim(ymax+0.5, ymin - 0.5)
dot_ax.set_xlim(-1, len(var_names))
# plot group legends on top of dot_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels,
rotation=var_group_rotation)
# plot colorbar
import matplotlib.colorbar
matplotlib.colorbar.ColorbarBase(color_legend, cmap=cmap, norm=normalize)
# for the dot size legend, use step between dot_max and dot_min
# based on how different they are.
diff = dot_max - dot_min
if 0.3 < diff <= 0.6:
step = 0.1
elif diff <= 0.3:
step = 0.05
else:
step = 0.2
# a descending range that is afterwards inverted is used
# to guarantee that dot_max is in the legend.
fracs_legends = np.arange(dot_max, dot_min, step * -1)[::-1]
if dot_min != 0 or dot_max != 1:
fracs_values = ((fracs_legends - dot_min) / old_range)
else:
fracs_values = fracs_legends
size = (fracs_values * 10) ** 2
size += smallest_dot
color = [cmap(normalize(value)) for value in np.repeat(max(mean_flat) * 0.7, len(size))]
# plot size bar
size_legend = fig.add_subplot(axs3[0])
size_legend.scatter(np.repeat(0, len(size)), range(len(size)), s=size, color=color)
size_legend.set_yticks(range(len(size)))
labels = ["{:.0%}".format(x) for x in fracs_legends]
if dot_max < 1:
labels[-1] = ">" + labels[-1]
size_legend.set_yticklabels(labels)
size_legend.set_yticklabels(["{:.0%}".format(x) for x in fracs_legends])
size_legend.tick_params(axis='y', left=False, labelleft=False, labelright=True)
# remove x ticks and labels
size_legend.tick_params(axis='x', bottom=False, labelbottom=False)
# remove surrounding lines
size_legend.spines['right'].set_visible(False)
size_legend.spines['top'].set_visible(False)
size_legend.spines['left'].set_visible(False)
size_legend.spines['bottom'].set_visible(False)
size_legend.grid(False)
ymin, ymax = size_legend.get_ylim()
size_legend.set_ylim(ymin, ymax+0.5)
_utils.savefig_or_show('dotplot', show=show, save=save)
return axs
@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args)
def matrixplot(
adata: AnnData,
var_names,
groupby=None,
use_raw=None,
log=False,
num_categories=7,
figsize=None,
dendrogram=False,
gene_symbols=None,
var_group_positions=None,
var_group_labels=None,
var_group_rotation=None,
layer=None,
standard_scale=None,
swap_axes=False,
show=None,
save=None,
**kwds,
):
"""\
Creates a heatmap of the mean expression values per cluster of each var_names
If groupby is not given, the matrixplot assumes that all data belongs to a single
category.
Parameters
----------
{common_plot_args}
standard_scale : {{`'var'`, `'group'`}}, optional (default: `None`)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
{show_save_ax}
**kwds
Are passed to :func:`matplotlib.pyplot.pcolor`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.matrixplot(adata, markers, groupby='bulk_labels', dendrogram=True)
See also
--------
rank_genes_groups_matrixplot: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
if use_raw is None and adata.raw is not None: use_raw = True
var_names, var_group_labels, var_group_positions = _check_var_names_type(var_names,
var_group_labels, var_group_positions)
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
gene_symbols=gene_symbols, layer=layer)
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
mean_obs = obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
mean_obs = mean_obs.sub(mean_obs.min(1), axis=0)
mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
mean_obs -= mean_obs.min(0)
mean_obs = (mean_obs / mean_obs.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warning('Unknown type for standard_scale, ignored')
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder matrix
if dendro_data['var_names_idx_ordered'] is not None:
# reorder columns (usually genes) if needed. This only happens when
# var_group_positions and var_group_labels is set
mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']]
# reorder rows (categories) to match the dendrogram order
mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :]
colorbar_width = 0.2
if not swap_axes:
dendro_width = 0.8 if dendrogram else 0
if figsize is None:
height = len(categories) * 0.2 + 1 # +1 for labels
heatmap_width = len(var_names) * 0.32
width = heatmap_width + dendro_width + colorbar_width # +1.6 to account for the colorbar and + 1 to account for labels
else:
width, height = figsize
heatmap_width = width - (dendro_width + colorbar_width)
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.5, 10]
height += 0.5
else:
height_ratios = [0, 10.5]
# define a layout of 2 rows x 3 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row
# is divided into three axes:
# first ax is for the main matrix figure
# second ax is for the dendrogram
# third ax is for the color bar legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.02, hspace=0.04,
width_ratios=[heatmap_width, dendro_width, colorbar_width],
height_ratios=height_ratios)
matrix_ax = fig.add_subplot(axs[1, 0])
y_ticks = np.arange(mean_obs.shape[0]) + 0.5
matrix_ax.set_yticks(y_ticks)
matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])])
if dendrogram:
dendro_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks)
pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds)
# invert y axis to show categories ordered from top to bottom
matrix_ax.set_ylim(mean_obs.shape[0], 0)
x_ticks = np.arange(mean_obs.shape[1]) + 0.5
matrix_ax.set_xticks(x_ticks)
matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90)
matrix_ax.tick_params(axis='both', labelsize='small')
matrix_ax.grid(False)
matrix_ax.set_xlim(-0.5, len(var_names) + 0.5)
matrix_ax.set_ylabel(groupby)
matrix_ax.set_xlim(0, mean_obs.shape[1])
# plot group legends on top of matrix_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels, rotation=var_group_rotation,
left_adjustment=0.2, right_adjustment=0.8)
# plot colorbar
_plot_colorbar(pc, fig, axs[1, 2])
else:
dendro_height = 0.5 if dendrogram else 0
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'color blocks' want to be plotted on the right of the image
vargroups_width = 0.4
else:
vargroups_width = 0
if figsize is None:
heatmap_height = len(var_names) * 0.2
height = dendro_height + heatmap_height + 1 # +1 for labels
heatmap_width = len(categories) * 0.3
width = heatmap_width + vargroups_width + colorbar_width
else:
width, height = figsize
heatmap_width = width - (vargroups_width + colorbar_width)
heatmap_height = height - dendro_height
# define a layout of 2 rows x 3 columns
# first row is for 'dendrogram' (if no dendrogram is plotted, the height of this row is zero)
# second row is for main content. This row
# is divided into three axes:
# first ax is for the main matrix figure
# second ax is for the groupby categories (eg. brackets)
# third ax is for the color bar legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.05, hspace=0.005,
width_ratios=[heatmap_width, vargroups_width, colorbar_width],
height_ratios=[dendro_height, heatmap_height])
mean_obs = mean_obs.T
matrix_ax = fig.add_subplot(axs[1, 0])
pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds)
y_ticks = np.arange(mean_obs.shape[0]) + 0.5
matrix_ax.set_yticks(y_ticks)
matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])])
x_ticks = np.arange(mean_obs.shape[1]) + 0.5
matrix_ax.set_xticks(x_ticks)
matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90)
matrix_ax.tick_params(axis='both', labelsize='small')
matrix_ax.grid(False)
matrix_ax.set_xlim(0, len(categories))
matrix_ax.set_xlabel(groupby)
# invert y axis to show var_names ordered from top to bottom
matrix_ax.set_ylim(mean_obs.shape[0], 0)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=x_ticks, orientation='top')
# plot group legends on top of matrix_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels, rotation=var_group_rotation,
left_adjustment=0.2, right_adjustment=0.8, orientation='right')
# plot colorbar
_plot_colorbar(pc, fig, axs[1, 2])
_utils.savefig_or_show('matrixplot', show=show, save=save)
return axs
@_doc_params(show_save_ax=doc_show_save_ax, common_plot_args=doc_common_plot_args)
def tracksplot(
adata: AnnData,
var_names,
groupby,
use_raw=None,
log=False,
dendrogram=False,
gene_symbols=None,
var_group_positions=None,
var_group_labels=None,
layer=None,
show=None,
save=None,
figsize=None,
**kwds,
):
"""\
In this type of plot each var_name is plotted as a filled line plot where the
y values correspond to the var_name values and x is each of the cells. Best results
are obtained when using raw counts that are not log.
`groupby` is required to sort and order the values using the respective group
and should be a categorical value.
Parameters
----------
{common_plot_args}
{show_save_ax}
**kwds
Are passed to :func:`~seaborn.heatmap`.
Returns
-------
A list of :class:`~matplotlib.axes.Axes`.
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']
>>> sc.pl.tracksplot(adata, markers, 'bulk_labels', dendrogram=True)
Using var_names as dict:
>>> markers = {{'T-cell': 'CD3D', 'B-cell': 'CD79A', 'myeloid': 'CST3'}}
>>> sc.pl.heatmap(adata, markers, groupby='bulk_labels', dendrogram=True)
.. currentmodule:: scanpy
See also
--------
pl.rank_genes_groups_tracksplot: to plot marker genes identified using the :func:`~scanpy.tl.rank_genes_groups` function.
"""
if groupby not in adata.obs_keys() or adata.obs[groupby].dtype.name != 'category':
raise ValueError('groupby has to be a valid categorical observation. Given value: {}, '
'valid categorical observations: {}'.
format(groupby, [x for x in adata.obs_keys() if adata.obs[x].dtype.name == 'category']))
var_names, var_group_labels, var_group_positions = _check_var_names_type(var_names,
var_group_labels, var_group_positions)
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, None,
gene_symbols=gene_symbols, layer=layer)
# get categories colors:
if groupby + "_colors" not in adata.uns:
from ._tools.scatterplots import _set_default_colors_for_categorical_obs
_set_default_colors_for_categorical_obs(adata, groupby)
groupby_colors = adata.uns[groupby + "_colors"]
if dendrogram:
# compute dendrogram if needed and reorder
# rows and columns to match leaves order.
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
# reorder obs_tidy
if dendro_data['var_names_idx_ordered'] is not None:
obs_tidy = obs_tidy.iloc[:, dendro_data['var_names_idx_ordered']]
var_names = [var_names[x] for x in dendro_data['var_names_idx_ordered']]
obs_tidy.index = obs_tidy.index.reorder_categories(
[categories[x] for x in dendro_data['categories_idx_ordered']], ordered=True)
categories = [categories[x] for x in dendro_data['categories_idx_ordered']]
groupby_colors = [groupby_colors[x] for x in dendro_data['categories_idx_ordered']]
obs_tidy = obs_tidy.sort_index()
# obtain the start and end of each category and make
# a list of ranges that will be used to plot a different
# color
cumsum = [0] + list(np.cumsum(obs_tidy.index.value_counts(sort=False)))
x_values = [(x, y) for x, y in zip(cumsum[:-1], cumsum[1:])]
dendro_height = 1 if dendrogram else 0
groupby_height = 0.24
num_rows = len(var_names) + 2 # +1 because of dendrogram on top and categories at bottom
if figsize is None:
width = 12
track_height = 0.25
else:
width, height = figsize
track_height = (height - (dendro_height + groupby_height)) / len(var_names)
height_ratios = [dendro_height] + [track_height] * len(var_names) + [groupby_height]
height = sum(height_ratios)
obs_tidy = obs_tidy.T
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(ncols=2, nrows=num_rows, wspace=1.0 / width,
hspace=0, height_ratios=height_ratios,
width_ratios=[width, 0.14])
axs_list = []
first_ax = None
for idx, var in enumerate(var_names):
ax_idx = idx + 1 # this is because of the dendrogram
if first_ax is None:
ax = fig.add_subplot(axs[ax_idx, 0])
first_ax = ax
else:
ax = fig.add_subplot(axs[ax_idx,0], sharex=first_ax)
axs_list.append(ax)
for cat_idx, category in enumerate(categories):
x_start, x_end = x_values[cat_idx]
ax.fill_between(range(x_start, x_end), 0, obs_tidy.iloc[idx, x_start:x_end], lw=0.1,
color=groupby_colors[cat_idx])
# remove the xticks labels except for the last processed plot.
# Because the plots share the x axis it is redundant and less compact to plot the
# axis for each plot
if idx < len(var_names) - 1:
ax.tick_params(labelbottom=False, labeltop=False, bottom=False, top=False)
ax.set_xlabel('')
if log:
ax.set_yscale('log')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(False)
ymin, ymax = ax.get_ylim()
ymax = int(ymax)
ax.set_yticks([ymax])
tt = ax.set_yticklabels([str(ymax)], ha='left', va='top')
ax.spines['right'].set_position(('axes', 1.01))
ax.tick_params(axis='y', labelsize='x-small', right=True, left=False, length=2,
which='both', labelright=True, labelleft=False, direction='in')
ax.set_ylabel(var, rotation=0, fontsize='small', ha='right', va='bottom')
ax.yaxis.set_label_coords(-0.005, 0.1)
ax.set_xlim(0, x_end)
ax.tick_params(axis='x', bottom=False, labelbottom=False)
# the ax to plot the groupby categories is split to add a small space
# between the rest of the plot and the categories
axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=axs[num_rows - 1, 0],
height_ratios=[1, 1])
groupby_ax = fig.add_subplot(axs2[1])
ticks, labels, groupby_cmap, norm = _plot_categories_as_colorblocks(groupby_ax, obs_tidy.T, colors=groupby_colors,
orientation='bottom')
# add lines to plot
overlay_ax = fig.add_subplot(axs[1:-1, 0], sharex=first_ax)
line_positions = np.cumsum(obs_tidy.T.index.value_counts(sort=False))[:-1]
overlay_ax.vlines(line_positions, 0, 1, lw=0.5, linestyle="--")
overlay_ax.axis('off')
overlay_ax.set_ylim(0, 1)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0], sharex=first_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, orientation='top', ticks=ticks)
axs_list.append(dendro_ax)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1:-1, 1])
arr = []
for idx, pos in enumerate(var_group_positions):
arr += [idx] * (pos[1]+1 - pos[0])
gene_groups_ax.imshow(np.matrix(arr).T, aspect='auto', cmap=groupby_cmap, norm=norm)
gene_groups_ax.axis('off')
axs_list.append(gene_groups_ax)
_utils.savefig_or_show('tracksplot', show=show, save=save)
return axs_list
@_doc_params(show_save_ax=doc_show_save_ax)
def dendrogram(
adata: AnnData,
groupby: str,
dendrogram_key: Optional[str] = None,
orientation: str = 'top',
remove_labels: bool = False,
show: Optional[bool] = None,
save: Optional[bool] = None,
):
"""Plots a dendrogram of the categories defined in `groupby`.
See :func:`~scanpy.tl.dendrogram`.
Parameters
----------
adata
groupby
Categorical data column used to create the dendrogram
dendrogram_key
Key under with the dendrogram information was stored.
By default the dendrogram information is stored under .uns['dendrogram_' + groupby].
orientation
Options are `top` (default), `bottom`, `left`, and `right`.
Only when `show_correlation` is False.
remove_labels
{show_save_ax}
Returns
-------
:class:`matplotlib.axes.Axes`
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, 'bulk_labels')
>>> sc.pl.dendrogram(adata, 'bulk_labels')
"""
fig, ax = pl.subplots()
_plot_dendrogram(ax, adata, groupby, dendrogram_key=dendrogram_key,
remove_labels=remove_labels, orientation=orientation)
_utils.savefig_or_show('dendrogram', show=show, save=save)
return ax
@_doc_params(show_save_ax=doc_show_save_ax)
def correlation_matrix(
adata: AnnData,
groupby: str,
show_correlation_numbers: bool = False,
dendrogram: Union[bool, str] = True,
figsize: Optional[Tuple[float, float]] = None,
show: Optional[bool] = None,
save: Optional[Union[bool, str]] = None,
**kwds,
):
"""Plots the correlation matrix computed as part of `sc.tl.dendrogram`.
Parameters
----------
adata
groupby
Categorical data column used to create the dendrogram
show_correlation_numbers
If `show_correlation` is True, plot the correlation number on top of each cell.
dendrogram
If True or a valid dendrogram key, a dendrogram based on the hierarchical clustering
between the `groupby` categories is added. The dendrogram information is computed
using :func:`scanpy.tl.dendrogram`. If `tl.dendrogram` has not been called previously
the function is called with default parameters.
figsize
By default a figure size that aims to produce a squared correlation matrix plot is used.
Format is (width, height)
{show_save_ax}
**kwds
Only if `show_correlation` is True:
Are passed to :func:`matplotlib.pyplot.pcolormesh` when plotting the
correlation heatmap. Useful values to pas are `vmax`, `vmin` and `cmap`.
Returns
-------
Examples
--------
>>> import scanpy as sc
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, 'bulk_labels')
>>> sc.pl.correlation(adata, 'bulk_labels')
"""
dendrogram_key = _get_dendrogram_key(adata, dendrogram, groupby)
index = adata.uns[dendrogram_key]['categories_idx_ordered']
corr_matrix = adata.uns[dendrogram_key]['correlation_matrix']
# reorder matrix columns according to the dendrogram
if dendrogram:
assert(len(index)) == corr_matrix.shape[0]
corr_matrix = corr_matrix[index, :]
corr_matrix = corr_matrix[:, index]
labels = list(adata.obs[groupby].cat.categories)
labels = np.array(labels).astype('str')[index]
else:
labels = adata.obs[groupby].cat.categories
num_rows = corr_matrix.shape[0]
colorbar_height = 0.2
if dendrogram:
dendrogram_width = 1.8
else:
dendrogram_width = 0
if figsize is None:
corr_matrix_height = num_rows * 0.6
height = corr_matrix_height + colorbar_height
width = corr_matrix_height + dendrogram_width
else:
width, height = figsize
corr_matrix_height = height - colorbar_height
fig = pl.figure(figsize=(width, height))
# layout with 2 rows and 2 columns:
# row 1: dendrogram + correlation matrix
# row 2: nothing + colormap bar (horizontal)
gs = gridspec.GridSpec(nrows=2, ncols=2, width_ratios=[dendrogram_width, corr_matrix_height],
height_ratios=[corr_matrix_height, colorbar_height], wspace=0.01, hspace=0.05)
axs = []
corr_matrix_ax = fig.add_subplot(gs[1])
if dendrogram:
dendro_ax = fig.add_subplot(gs[0], sharey=corr_matrix_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram_key,
remove_labels=True, orientation='left',
ticks=np.arange(corr_matrix .shape[0]) + 0.5)
axs.append(dendro_ax)
# define some default pcolormesh parameters
if 'edge_color' not in kwds:
if corr_matrix.shape[0] > 30:
# when there are too many rows it is better to remove
# the black lines surrounding the boxes in the heatmap
kwds['edgecolors'] = 'none'
else:
kwds['edgecolors'] = 'black'
kwds['linewidth'] = 0.01
if 'vmax' not in kwds and 'vmin' not in kwds:
kwds['vmax'] = 1
kwds['vmin'] = -1
if 'cmap' not in kwds:
# by default use a divergent color map
kwds['cmap'] = 'bwr'
img_mat = corr_matrix_ax.pcolormesh(corr_matrix, **kwds)
corr_matrix_ax.set_xlim(0, num_rows)
corr_matrix_ax.set_ylim(0, num_rows)
corr_matrix_ax.yaxis.tick_right()
corr_matrix_ax.set_yticks(np.arange(corr_matrix .shape[0]) + 0.5)
corr_matrix_ax.set_yticklabels(labels)
corr_matrix_ax.xaxis.set_tick_params(labeltop=True)
corr_matrix_ax.xaxis.set_tick_params(labelbottom=False)
corr_matrix_ax.set_xticks(np.arange(corr_matrix .shape[0]) + 0.5)
corr_matrix_ax.set_xticklabels(labels, rotation=45, ha='left')
corr_matrix_ax.tick_params(
axis='x',
which='both',
bottom=False,
top=False)
corr_matrix_ax.tick_params(
axis='y',
which='both',
left=False,
right=False)
if show_correlation_numbers:
for row in range(num_rows):
for col in range(num_rows):
corr_matrix_ax.text(row + 0.5, col + 0.5,
"{:.2f}".format(corr_matrix[row, col]),
ha='center', va='center')
axs.append(corr_matrix_ax)
# Plot colorbar
colormap_ax = fig.add_subplot(gs[3])
cobar = pl.colorbar(img_mat, cax=colormap_ax, orientation='horizontal')
cobar.solids.set_edgecolor("face")
axs.append(colormap_ax)
return axs
def _prepare_dataframe(
adata: AnnData,
var_names,
groupby=None,
use_raw=None,
log=False,
num_categories=7,
layer=None,
gene_symbols=None,
):
"""
Given the anndata object, prepares a data frame in which the row index are the categories
defined by group by and the columns correspond to var_names.
Parameters
----------
adata
Annotated data matrix.
var_names : `str` or list of `str`
`var_names` should be a valid subset of `adata.var_names`.
groupby : `str` or `None`, optional (default: `None`)
The key of the observation grouping to consider. It is expected that
groupby is a categorical. If groupby is not a categorical observation,
it would be subdivided into `num_categories`.
log : `bool`, optional (default: `False`)
Use the log of the values
use_raw : `bool`, optional (default: `None`)
Use `raw` attribute of `adata` if present.
num_categories : `int`, optional (default: `7`)
Only used if groupby observation is not categorical. This value
determines the number of groups into which the groupby observation
should be subdivided.
gene_symbols : string, optional (default: `None`)
Key for field in .var that stores gene symbols.
Returns
-------
Tuple of `pandas.DataFrame` and list of categories.
"""
from scipy.sparse import issparse
sanitize_anndata(adata)
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
if groupby is not None:
if groupby not in adata.obs_keys():
raise ValueError('groupby has to be a valid observation. Given value: {}, '
'valid observations: {}'.format(groupby, adata.obs_keys()))
if gene_symbols is not None and gene_symbols in adata.var.columns:
# translate gene_symbols to var_names
# slow method but gives a meaningful error en case no gene symbol is found:
translated_var_names = []
for symbol in var_names:
if symbol not in adata.var[gene_symbols].values:
logg.error(f"Gene symbol {symbol!r} not found in given gene_symbols column: {gene_symbols!r}")
return
translated_var_names.append(adata.var[adata.var[gene_symbols] == symbol].index[0])
symbols = var_names
var_names = translated_var_names
if layer is not None:
if layer not in adata.layers.keys():
raise KeyError('Selected layer: {} is not in the layers list. The list of '
'valid layers is: {}'.format(layer, adata.layers.keys()))
matrix = adata[:, var_names].layers[layer]
elif use_raw:
matrix = adata.raw[:, var_names].X
else:
matrix = adata[:, var_names].X
if issparse(matrix):
matrix = matrix.toarray()
if log:
matrix = np.log1p(matrix)
obs_tidy = pd.DataFrame(matrix, columns=var_names)
if groupby is None:
groupby = ''
categorical = pd.Series(np.repeat('', len(obs_tidy))).astype('category')
else:
if not | is_categorical_dtype(adata.obs[groupby]) | pandas.api.types.is_categorical_dtype |
import os
n_threads = 1
os.environ["NUMBA_NUM_THREADS"] = f"{n_threads}"
os.environ["MKL_NUM_THREADS"] = f"{n_threads}"
os.environ["OMP_NUM_THREADS"] = f"{n_threads}"
os.environ["NUMEXPR_NUM_THREADS"] = f"{n_threads}"
import respy as rp
from estimagic.differentiation.differentiation import jacobian
from estimagic.inference.likelihood_covs import cov_jacobian
from pathlib import Path
import pandas as pd
import numpy as np
from morris import elementary_effects
from time import time
from joblib import wrap_non_picklable_objects
from joblib.externals.loky import set_loky_pickler
start_params, options, data = rp.get_example_model("kw_94_one", with_data=True)
start_params = pd.read_csv("params.csv").set_index(["category", "name"])
options["simulation_agents"] = 4000
to_drop = [
('lagged_choice_1_edu', 'probability'),
('initial_exp_edu_10', 'probability'),
('maximum_exp', 'edu')
]
cov_path = Path("bld/cov.pickle")
if cov_path.exists():
cov = pd.read_pickle(cov_path)
else:
loglikeobs = rp.get_crit_func(
start_params, options, data, return_scalar=False)
jac = jacobian(loglikeobs, start_params, extrapolation=False)
reduced_jac = jac.drop(columns=to_drop)
cov = cov_jacobian(reduced_jac)
| pd.to_pickle(cov, cov_path) | pandas.to_pickle |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# turn off pink warning boxes
import warnings
warnings.filterwarnings("ignore")
#-----------------------------------------------------------------------------
def clean_flood(flood):
'''Drops unneeded columns from the med center flooding df
Makes sure DateTime is in DateTime format'''
# drop the columns
flood = flood.drop(columns=['LAT', 'LONG', 'Zone',
'SensorStatus', 'AlertTriggered',
'Temp_C', 'Temp_F', 'Vendor'])
# Set to date time format
flood.DateTime = pd.to_datetime(flood.DateTime)
flood = flood.rename(columns={"DateTime": "datetime",
"DistToWL_ft": "sensor_to_water_feet",
"DistToWL_m": "sensor_to_water_meters",
"DistToDF_ft": "sensor_to_ground_feet",
"DistToDF_m": "sensor_to_ground_meters"})
# replae -999 with 0
flood["sensor_to_ground_feet"].replace({-999:13.5006561680}, inplace=True)
flood["sensor_to_ground_meters"].replace({-999:4.115}, inplace=True)
#flood = flood.replace(to_replace=-999, value=0)
# create new features for flood depth
flood['flood_depth_feet'] = flood.sensor_to_ground_feet - flood.sensor_to_water_feet
flood['flood_depth_meters'] = flood.sensor_to_ground_meters - flood.sensor_to_water_meters
# Create new alert
def flood_alert(c):
if 0 < c['flood_depth_feet'] < 0.66667:
return 'No Risk'
elif 0.66667 < c['flood_depth_feet'] < 1.08333:
return 'Minor Risk'
elif 1.08333 < c['flood_depth_feet'] < 2.16667:
return 'Moderate Risk'
elif 2.16667 < c['flood_depth_feet']:
return 'Major Risk !'
else:
return 'No Alert'
flood['flood_alert'] = flood.apply(flood_alert, axis=1)
flood = flood[(flood.sensor_to_water_feet != -999)]
# return new df
return flood
#-----------------------------------------------------------------------------
def clean_air(air):
'''Drops unneeded columns from the air quality df
then handles the nulls in alert triggered column
set to date time format'''
# drop the colums
air = air.drop(columns=['LAT', 'LONG', 'Zone',
'Sensor_id', 'SensorModel',
'SensorStatus', 'Vendor'])
# replace nulls in ALertTriggered to None
air.fillna("None", inplace = True)
# set to date time format
air.DateTime = pd.to_datetime(air.DateTime)
# rename features
air = air.rename(columns={"DateTime": "datetime",
"AlertTriggered":"alert_triggered"})
air = air.replace(to_replace=-999, value=0)
# create time series features
air['dates'] = pd.to_datetime(air['datetime']).dt.date
air['time'] = pd.to_datetime(air['datetime']).dt.time
air['hour'] = pd.to_datetime(air['datetime']).dt.hour
air['weekday'] = pd.to_datetime(air['datetime']).dt.weekday
# make all CO bins
air['AQI_CO'] = pd.cut(air.CO,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
CO_24hr = air.groupby('dates', as_index=False)['CO'].mean()
CO_24hr = CO_24hr.rename(columns={'CO':'CO_24hr'})
air = air.merge(CO_24hr, on = 'dates', how ='left')
air['AQI_CO_24hr'] = pd.cut(air.CO_24hr,
bins = [-1,4.5,9.5,12.5,15.5,30.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
air['AQI_pm2_5'] = pd.cut(air.Pm2_5,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
pm_25_24hr = air.groupby('dates', as_index=False)['Pm2_5'].mean()
pm_25_24hr = pm_25_24hr.rename(columns={'Pm2_5':'Pm_25_24hr'})
air = air.merge(pm_25_24hr, on = 'dates', how ='left')
air['AQI_pm_25_24hr'] = pd.cut(air.Pm_25_24hr,
bins = [-1,12.1,35.5,55.5,150.5,250.5,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
air['AQI_pm10'] = pd.cut(air.Pm10,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
pm_10_24hr = air.groupby('dates', as_index=False)['Pm10'].mean()
pm_10_24hr = pm_10_24hr.rename(columns={'Pm10':'Pm_10_24hr'})
air = air.merge(pm_10_24hr, on = 'dates', how ='left')
air['AQI_pm10_24hr'] = pd.cut(air.Pm_10_24hr,
bins = [-1,55,154,255,355,425,4000],
labels = ['Good', 'Moderate',
'Unhealthy for Sensitive Groups', "Unhealthy",
"Very Unhealthy", 'Hazardous'])
return air
#-----------------------------------------------------------------------------
def wrangle_weather(weather):
'''
This function will drop unneccessary columns,
change datetime to a pandas datetime datatype,
and rename columns to be more readable to return
a clean dataframe.
'''
#read csv and turn into pandas dataframe
sa_weather = pd.read_csv('SA_weather.csv')
# concat sa date and time
sa_weather['Date_Time'] = sa_weather['Date'] + ' ' + sa_weather['Time']
# put into date time format
sa_weather.Date_Time = pd.to_datetime(sa_weather.Date_Time)
# round to nearest hour
sa_weather['DateTime'] = sa_weather['Date_Time'].dt.round('60min')
# set sa weather index
sa_weather = sa_weather.set_index('DateTime')
# drop old datetime
sa_weather = sa_weather.drop(columns=['Date_Time', 'Temp', 'Humidity', 'Barometer'])
# rename
sa_weather = sa_weather.rename(columns={"Time": "time",
"Date": "date",
"Weather": "weather",
"Wind": "wind",
"Visibility": "visibility"})
#drop columns we will not be using
weather.drop(columns=[
'Sensor_id',
'Vendor',
'SensorModel',
'LAT',
'LONG',
'Zone',
'AlertTriggered',
'SensorStatus'], inplace=True)
#rename columns to be more readable
weather = weather.rename(columns={"DateTime": "datetime",
"Temp_C": "celsius",
"Temp_F": "farenheit",
"Humidity": "humidity",
"DewPoint_C": "dewpoint_celsius",
"DewPoint_F": "dewpoint_farenheit",
"Pressure_Pa": "pressure"})
#change datetime to pandas datetime object
weather.datetime = pd.to_datetime(weather.datetime)
# round to hour
weather['DateTime'] = weather['datetime'].dt.round('60min')
# set index
weather = weather.set_index('DateTime')
# join the 2 df's
weather = weather.join(sa_weather, how='right')
# repalce -999
weather = weather.replace(to_replace=-999, value=0)
# drop nulls
weather.dropna(inplace = True)
#return clean weather df
return weather
#-----------------------------------------------------------------------------
def wrangle_sound(df):
'''
This function drops unnecessary columns and
converts the 'DateTime' column to a datetime
object
'''
# Drops unnecessary columns
df = df.drop(columns = ['SensorStatus', 'AlertTriggered', 'Zone', 'LONG',
'LAT', 'SensorModel', 'Vendor', 'Sensor_id'])
# Converts to datetime
df['DateTime'] = pd.to_datetime(df.DateTime)
# make noise level feature
df['how_loud'] = pd.cut(df.NoiseLevel_db,
bins = [-1,46,66,81,101,4000],
labels = ['Normal', 'Moderate',
'Loud', "Very Loud",
"Extremely Loud"])
def sound_alert(c):
if c['NoiseLevel_db'] > 80:
return 'Minor Risk'
elif c['NoiseLevel_db'] > 120:
return 'Major Risk'
else:
return 'No Alert'
df['sound_alert'] = df.apply(sound_alert, axis=1)
return df
#-----------------------------------------------------------------------------
def full_daily_downtown_COSA_dataframe():
'''
This function takes in all COSA dataframes,
averages them by day, then joins them all together
using the day as a primary key
'''
# Pulls sound CSV and sets datetime as index, then orders it
df = pd.read_csv('downtown_sound.csv')
sound_df = wrangle_sound(df)
sound_df = sound_df.set_index('DateTime')
sound_df = sound_df.sort_index()
# Pulls flood CSV and sets datetime as index
flood = pd.read_csv('downtown_flood.csv')
flood_df = clean_flood(flood)
flood_df = flood_df.set_index('datetime')
# Pulls weather CSV
weather = | pd.read_csv('downtown_weather.csv') | pandas.read_csv |
## import packages
import pandas as pd
import numpy as np
from glob import glob
import warnings
pd.options.mode.chained_assignment = None # default='warn'
def read_all_csvs(csv_locations):
'''
Read csvs from all locations and return them as a dict, where keys are previous folder locations and values are dataframes.
Concatenates the dataframes when the keys are the same
Input:
csv_locations(list): contains list of all csv locations
Output:
base_data(dict): Dictionary of all dataframes
'''
## find the parent directory names
parent_dicts = [[i.split('/')[-2] for i in j] for j in csv_locations]
base_data = dict()
for i,j in zip(parent_dicts, csv_locations):
for k, l in zip(i, j):
## excluding Washington Post as it has some issues in reading that data
if k in ['Washington Post']:
continue
# print(k, l)
if not (k in base_data):
# print(l)
base_data[k] = | pd.read_csv(l) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zohaib
This script merges Pangolin report (assigned lineages) with the
metadata file which allows data extraction and filtering based on
lineage information in nf-ncov-voc workflow.
"""
import argparse
import pandas as pd
import csv
def parse_args():
parser = argparse.ArgumentParser(
description='Merges pangolin output report and metadata file '
'using isolate as key')
parser.add_argument('--metadata', type=str, default=None,
help='Metadata file (.tsv) format')
parser.add_argument('--pangolin', type=str, default=None,
help='Pangolin report (.csv) format')
parser.add_argument('--output', type=str, default=None,
help='Metadata file (.tsv) format')
return parser.parse_args()
def write_metadata(dataframe):
dataframe.to_csv(args.output,
sep="\t",
quoting=csv.QUOTE_NONE,
index=False, header=True)
if __name__ == '__main__':
args = parse_args()
metadata_df = pd.read_csv(args.metadata, sep="\t")
pangolin_df = | pd.read_csv(args.pangolin) | pandas.read_csv |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import unittest
import nose
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.try_numba import NUMBA_AVAILABLE
from trackpy.linking import PointND, link, Hash_table
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
path = os.path.join(path, 'data')
# Call lambda function for a fresh copy each time.
unit_steps = lambda: [[PointND(t, (x, 0))] for t, x in enumerate(range(5))]
np.random.seed(0)
random_x = np.random.randn(5).cumsum()
random_x -= random_x.min() # All x > 0
max_disp = np.diff(random_x).max()
random_walk_legacy = lambda: [[PointND(t, (x, 5))]
for t, x in enumerate(random_x)]
def hash_generator(dims, box_size):
return lambda: Hash_table(dims, box_size)
def _skip_if_no_numba():
if not NUMBA_AVAILABLE:
raise nose.SkipTest('numba not installed. Skipping.')
def random_walk(N):
return np.cumsum(np.random.randn(N))
def contracting_grid():
"""Two frames with a grid of 441 points.
In the second frame, the points contract, so that the outermost set
coincides with the second-outermost set in the previous frame.
This is a way to challenge (and/or stump) a subnet solver.
"""
pts0x, pts0y = np.mgrid[-10:11,-10:11]
pts0 = pd.DataFrame(dict(x=pts0x.flatten(), y=pts0y.flatten(),
frame=0))
pts1 = pts0.copy()
pts1.frame = 1
pts1.x = pts1.x * 0.9
pts1.y = pts1.y * 0.9
allpts = pd.concat([pts0, pts1], ignore_index=True)
allpts.x += 100 # Because BTree doesn't allow negative coordinates
allpts.y += 100
return allpts
class CommonTrackingTests(object):
do_diagnostics = False # Don't ask for diagnostic info from linker
def test_one_trivial_stepper(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_search_range' in self.diag.columns
# Except for first frame, all particles should have been labeled
# with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 0].isnull())
def test_two_isolated_steppers(self):
N = 5
Y = 25
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_two_isolated_steppers_one_gapped(self):
N = 5
Y = 25
# Begin second feature one frame later than the first,
# so the particle labeling (0, 1) is established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': np.arange(N)})
a = a.drop(3).reset_index(drop=True)
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy()
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# link_df_iter() tests not performed, because hash_size is
# not knowable from the first frame alone.
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_isolated_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N), 'y': M + random_walk(N), 'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1), 'y': M + Y + random_walk(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(2*M, Y + 2*M))
assert_frame_equal(actual_iter, expected)
# Many 2D random walks
np.random.seed(0)
initial_positions = [(100, 100), (200, 100), (100, 200), (200, 200)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(200 + M, 200 + M))
assert_frame_equal(actual_iter, expected)
def test_start_at_frame_other_than_zero(self):
# One 1D stepper
N = 5
FIRST_FRAME = 3
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': FIRST_FRAME + np.arange(N)})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(6, 2))
assert_frame_equal(actual, expected)
def test_blank_frame_no_memory(self):
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N),
'frame': [0, 1, 2, 4, 5]})
expected = f.copy()
expected['particle'] = np.zeros(N)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(10, 10))
assert_frame_equal(actual, expected)
# This doesn't error, but we might wish it would
# give the particle a new ID after the gap. It just
# ignores the missing frame.
def test_real_data_that_causes_duplicate_bug(self):
filename = 'reproduce_duplicate_track_assignment.df'
f = pd.read_pickle(os.path.join(path, filename))
# Not all parameters reproduce it, but these do
self.link_df(f, 8, 2, verify_integrity=True)
def test_search_range(self):
t = self.link(unit_steps(), 1.1, hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(unit_steps(), 0.9, hash_generator((10, 10), 1))
assert len(t_short) == len(unit_steps()) # Each step is a separate track.
t = self.link(random_walk_legacy(), max_disp + 0.1,
hash_generator((10, 10), 1))
assert len(t) == 1 # One track
t_short = self.link(random_walk_legacy(), max_disp - 0.1,
hash_generator((10, 10), 1))
assert len(t_short) > 1 # Multiple tracks
def test_box_size(self):
"""No matter what the box size, there should be one track, and it should
contain all the points."""
for box_size in [0.1, 1, 10]:
t1 = self.link(unit_steps(), 1.1, hash_generator((10, 10), box_size))
t2 = self.link(random_walk_legacy(), max_disp + 1,
hash_generator((10, 10), box_size))
assert len(t1) == 1
assert len(t2) == 1
assert len(t1[0].points) == len(unit_steps())
assert len(t2[0].points) == len(random_walk_legacy())
def test_easy_tracking(self):
level_count = 5
p_count = 16
levels = []
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(j, (j, k)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count * 2 + 1), .5)
tracks = self.link(levels, 1.5, hash_generator)
assert len(tracks) == p_count
for t in tracks:
x, y = zip(*[p.pos for p in t])
dx = np.diff(x)
dy = np.diff(y)
assert np.sum(dx) == level_count - 1
assert np.sum(dy) == 0
def test_copy(self):
"""Check inplace/copy behavior of link_df, link_df_iter"""
# One 1D stepper
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f_inplace = f.copy()
expected = f.copy()
expected['particle'] = np.zeros(N)
# Should add particle column in-place
# UNLESS diagnostics are enabled
actual = self.link_df(f_inplace, 5)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'particle' not in f_inplace.columns
else:
assert_frame_equal(actual, f_inplace)
# Should copy
actual = self.link_df(f, 5, copy_features=True)
assert_frame_equal(actual, expected)
assert 'particle' not in f.columns
# Should copy
actual_iter = self.link_df_iter(f, 5, hash_size=(10, 2))
assert_frame_equal(actual_iter, expected)
assert 'particle' not in f.columns
@nose.tools.raises(tp.SubnetOversizeException)
def test_oversize_fail(self):
self.link_df(contracting_grid(), 1)
@nose.tools.raises(tp.SubnetOversizeException)
def test_adaptive_fail(self):
"""Check recursion limit"""
self.link_df(contracting_grid(), 1, adaptive_stop=0.92)
def link(self, *args, **kwargs):
kwargs.update(self.linker_opts)
return tp.link(*args, **kwargs)
def link_df(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
return tp.link_df(*args, **kwargs)
def link_df_iter(self, *args, **kwargs):
kwargs.update(self.linker_opts)
kwargs['diagnostics'] = self.do_diagnostics
args = list(args)
features = args.pop(0)
res = pd.concat(tp.link_df_iter(
(df for fr, df in features.groupby('frame')), *args, **kwargs))
return res.sort(['particle', 'frame']).reset_index(drop=True)
class TestOnce(unittest.TestCase):
# simple API tests that need only run on one engine
def setUp(self):
N = 5
f = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
self.features = f
def test_t_column(self):
f = self.features.copy()
cols = list(f.columns)
name = 'arbitrary name'
cols[cols.index('frame')] = name
f.columns = cols
# smoke tests
tp.link_df(f, 5, t_column=name, verify_integrity=True)
f_iter = (frame for fnum, frame in f.groupby('arbitrary name'))
list(tp.link_df_iter(f_iter, 5, t_column=name, verify_integrity=True))
@nose.tools.raises(ValueError)
def test_check_iter(self):
"""Check that link_df_iter() makes a useful error message if we
try to pass a single DataFrame."""
list(tp.link_df_iter(self.features.copy(), 5))
class SubnetNeededTests(CommonTrackingTests):
"""Tests that assume a best-effort subnet linker (i.e. not "drop")."""
def test_two_nearby_steppers(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_subnet' in self.diag.columns
assert 'diag_subnet_size' in self.diag.columns
# Except for frame in which they appear, all particles should have
# been labeled with a search_range
assert not any(self.diag['diag_search_range'][
actual_iter.frame > 1].isnull())
# The number of loop iterations is reported by the numba linker only
if self.linker_opts['link_strategy'] == 'numba':
assert 'diag_subnet_iterations' in self.diag.columns
def test_two_nearby_steppers_one_gapped(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
a = a.drop(3).reset_index(drop=True)
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.array([0, 0, 0, 2]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f.sort('frame'), 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual_iter = self.link_df_iter(f1, 5, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
def test_nearby_continuous_random_walks(self):
# Two 2D random walks
np.random.seed(0)
N = 30
Y = 250
M = 20 # margin, because negative values raise OutOfHash
a = DataFrame({'x': M + random_walk(N),
'y': M + random_walk(N),
'frame': np.arange(N)})
b = DataFrame({'x': M + random_walk(N - 1),
'y': M + Y + random_walk(N - 1),
'frame': np.arange(1, N)})
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.zeros(N), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M + Y))
assert_frame_equal(actual, expected)
# Several 2D random walks
np.random.seed(0)
initial_positions = [(10, 11), (10, 18), (14, 15), (20, 21), (13, 13),
(10, 10), (17, 19)]
import itertools
c = itertools.count()
def walk(x, y):
i = next(c)
return DataFrame({'x': x + random_walk(N - i),
'y': y + random_walk(N - i),
'frame': np.arange(i, N)})
f = pd.concat([walk(*pos) for pos in initial_positions])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([i*np.ones(N - i) for i in range(len(initial_positions))])
expected.sort(['particle', 'frame'], inplace=True)
actual = self.link_df(f, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f, 5, hash_size=(2*M, 2*M))
assert_frame_equal(actual, expected)
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5)
assert_frame_equal(actual, expected)
actual = self.link_df_iter(f1, 5, hash_size=(2*M, 2*M))
assert_frame_equal(actual, expected)
def test_quadrature_distances(self):
"""A simple test to check whether the subnet linker adds
distances in quadrature (as in Crocker-Grier)."""
def subnet_test(epsilon):
"""Returns 2 features in 2 frames, which represent a special
case when the subnet linker adds distances in quadrature. With
epsilon=0, subnet linking is degenerate. Therefore
linking should differ for positive and negative epsilon."""
return pd.DataFrame([(0, 10, 11), (0, 10, 8),
(1, 9, 10), (1, 12, 10 + epsilon)],
columns=['frame', 'x', 'y'])
trneg = self.link_df(subnet_test(0.01), 5, retain_index=True)
trpos = self.link_df(subnet_test(-0.01), 5, retain_index=True)
assert not np.allclose(trneg.particle.values, trpos.particle.values)
def test_memory(self):
"""A unit-stepping trajectory and a random walk are observed
simultaneously. The random walk is missing from one observation."""
a = [p[0] for p in unit_steps()]
b = [p[0] for p in random_walk_legacy()]
# b[2] is intentionally omitted below.
gapped = lambda: deepcopy([[a[0], b[0]], [a[1], b[1]], [a[2]],
[a[3], b[3]], [a[4], b[4]]])
safe_disp = 1 + random_x.max() - random_x.min() # Definitely large enough
t0 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=0)
assert len(t0) == 3, len(t0)
t2 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=2)
assert len(t2) == 2, len(t2)
def test_memory_removal(self):
"""BUG: A particle remains in memory after its Track is resumed, leaving two
copies that can independently pick up desinations, leaving two Points in the
same Track in a single level."""
levels = []
levels.append([PointND(0, [1, 1]), PointND(0, [4, 1])]) # two points
levels.append([PointND(1, [1, 1])]) # one vanishes, but is remembered
levels.append([PointND(2, [1, 1]), PointND(2, [2, 1])]) # resume Track
levels.append([PointND(3, [1, 1]), PointND(3, [2, 1]), PointND(3, [4, 1])])
t = self.link(levels, 5, hash_generator((10, 10), 1), memory=2)
assert len(t) == 3, len(t)
def test_memory_with_late_appearance(self):
a = [p[0] for p in unit_steps()]
b = [p[0] for p in random_walk_legacy()]
gapped = lambda: deepcopy([[a[0]], [a[1], b[1]], [a[2]],
[a[3]], [a[4], b[4]]])
safe_disp = 1 + random_x.max() - random_x.min() # large enough
t0 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=1)
assert len(t0) == 3, len(t0)
t2 = self.link(gapped(), safe_disp, hash_generator((10, 10), 1), memory=4)
assert len(t2) == 2, len(t2)
def test_memory_on_one_gap(self):
N = 5
Y = 2
# Begin second feature one frame later than the first, so the particle labeling (0, 1) is
# established and not arbitrary.
a = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.ones(N - 1), 'frame': np.arange(1, N)})
a = a.drop(3).reset_index(drop=True)
f = pd.concat([a, b])
expected = f.copy().reset_index(drop=True)
expected['particle'] = np.concatenate([np.array([0, 0, 0, 0]), np.ones(N - 1)])
expected.sort(['particle', 'frame'], inplace=True)
expected.reset_index(drop=True, inplace=True)
actual = self.link_df(f, 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f, 5, hash_size=(50, 50), memory=1)
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
# Sort rows by frame (normal use)
actual = self.link_df(f.sort('frame'), 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f.sort('frame'), 5,
memory=1, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
# Shuffle rows (crazy!)
np.random.seed(0)
f1 = f.reset_index(drop=True)
f1.reindex(np.random.permutation(f1.index))
actual = self.link_df(f1, 5, memory=1)
assert_frame_equal(actual, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
actual_iter = self.link_df_iter(f1, 5, memory=1, hash_size=(50, 50))
assert_frame_equal(actual_iter, expected)
if self.do_diagnostics:
assert 'diag_remembered' in self.diag.columns
def test_pathological_tracking(self):
level_count = 5
p_count = 16
levels = []
shift = 1
for j in range(level_count):
level = []
for k in np.arange(p_count) * 2:
level.append(PointND(k // 2, (j, k + j * shift)))
levels.append(level)
hash_generator = lambda: Hash_table((level_count + 1,
p_count*2 + level_count*shift + 1),
.5)
tracks = self.link(levels, 8, hash_generator)
assert len(tracks) == p_count, len(tracks)
class DiagnosticsTests(CommonTrackingTests):
"""Mixin to obtain diagnostic info from the linker.
Makes examining that info optional, so that most tests can focus on
correctness of tracking.
"""
do_diagnostics = True
def _strip_diag(self, df):
"""Move diagnostic columns from the returned DataFrame into a buffer.
"""
diag_cols = [cn for cn in df.columns if cn.startswith('diag_')]
self.diag = df.reindex(columns=diag_cols)
return tp.strip_diagnostics(df)
def link_df(self, *args, **kwargs):
return self._strip_diag(
super(DiagnosticsTests, self).link_df(*args, **kwargs))
def link_df_iter(self, *args, **kwargs):
df = self._strip_diag(
super(DiagnosticsTests, self).link_df_iter(*args, **kwargs))
# pd.concat() can mess with the column order if not all columns
# are present in all DataFrames. So we enforce it here.
return df.reindex(columns=['frame', 'x', 'y', 'particle'])
class NumbaOnlyTests(SubnetNeededTests):
"""Tests that are unbearably slow without a fast subnet linker."""
def test_adaptive_range(self):
cg = contracting_grid()
# Allow 5 applications of the step
tracks = self.link_df(cg, 1, adaptive_step=0.8, adaptive_stop=0.32)
# Transform back to origin
tracks.x -= 100
tracks.y -= 100
assert len(cg) == len(tracks)
tr0 = tracks[tracks.frame == 0].set_index('particle')
tr1 = tracks[tracks.frame == 1].set_index('particle')
only0 = list(set(tr0.index) - set(tr1.index))
only1 = list(set(tr1.index) - set(tr0.index))
# From the first frame, the outermost particles should have been lost.
assert all((tr0.x.ix[only0].abs() > 9.5) | (tr0.y.ix[only0].abs() > 9.5))
# There should be new tracks in the second frame, corresponding to the
# middle radii.
assert all((tr1.x.ix[only1].abs() == 4.5) | (tr1.y.ix[only1].abs() == 4.5))
if self.do_diagnostics:
# We use this opportunity to check for diagnostic data
# made by the numba linker only.
assert 'diag_subnet_iterations' in self.diag.columns
class TestKDTreeWithDropLink(CommonTrackingTests, unittest.TestCase):
def setUp(self):
self.linker_opts = dict(link_strategy='drop',
neighbor_strategy='KDTree')
def test_drop_link(self):
# One 1D stepper. A new particle appears in frame 2.
# The resulting subnet causes the trajectory to be broken
# when link_strategy is 'drop' and search_range is large enough.
N = 2
f_1particle = DataFrame({'x': np.arange(N), 'y': np.ones(N), 'frame': np.arange(N)})
f = f_1particle.append(DataFrame(
{'x': [3], 'y': [1], 'frame': [1]}), ignore_index=True)
f_expected_without_subnet = f.copy()
f_expected_without_subnet['particle'] = [0, 0, 1]
# The linker assigns new particle IDs in arbitrary order. So
# comparing with expected values is tricky.
# We just check for the creation of 2 new trajectories.
without_subnet = self.link_df(f, 1.5, retain_index=True)
| assert_frame_equal(without_subnet, f_expected_without_subnet, check_dtype=False) | pandas.util.testing.assert_frame_equal |
"""
module for testing plot_corr(df, x, y) function.
"""
import random
from time import time
import dask.array as da
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pytest
from ...eda.correlation import compute_correlation, plot_correlation
from ...eda.correlation.compute import (
kendall_tau_1xn,
kendall_tau_nxn,
pearson_1xn,
pearson_nxn,
spearman_1xn,
spearman_nxn,
)
from ...eda.utils import to_dask
@pytest.fixture(scope="module") # type: ignore
def simpledf() -> dd.DataFrame:
df = pd.DataFrame(np.random.rand(100, 3), columns=["a", "b", "c"])
df = pd.concat([df, pd.Series(["a"] * 100)], axis=1)
df.columns = ["a", "b", "c", "d"]
df = to_dask(df)
return df
def test_sanity_compute_1(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf)
plot_correlation(simpledf)
def test_sanity_compute_2(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, k=1)
plot_correlation(simpledf, k=1)
def test_sanity_compute_3(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="a")
plot_correlation(simpledf, x="a")
def test_sanity_compute_4(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="a", value_range=(0.5, 0.8))
plot_correlation(simpledf, x="a", value_range=(0.5, 0.8))
def test_sanity_compute_5(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="a", k=1)
plot_correlation(simpledf, x="a", k=1)
def test_sanity_compute_6(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="a", k=0)
plot_correlation(simpledf, x="a", k=0)
def test_sanity_compute_7(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="b", y="a")
plot_correlation(simpledf, x="b", y="a")
def test_sanity_compute_8(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="b", y="a", k=1)
plot_correlation(simpledf, x="b", y="a", k=1)
def test_sanity_compute_9(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, value_range=(0.3, 0.7))
plot_correlation(simpledf, value_range=(0.3, 0.7))
@pytest.mark.xfail # type: ignore
def test_sanity_compute_fail_2(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, k=3, value_range=(0.3, 0.7))
plot_correlation(simpledf, k=3, value_range=(0.3, 0.7))
@pytest.mark.xfail # type: ignore
def test_sanity_compute_fail_3(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="a", value_range=(0.5, 0.8), k=3)
plot_correlation(simpledf, x="a", value_range=(0.5, 0.8), k=3)
@pytest.mark.xfail # type: ignore
def test_sanity_compute_fail_4(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, y="a")
plot_correlation(simpledf, y="a")
@pytest.mark.xfail # type: ignore
def test_sanity_compute_fail_5(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="d")
plot_correlation(simpledf, x="d")
@pytest.mark.xfail # type: ignore
def test_test_sanity_compute_fail_6(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="b", y="a", value_range=(0.5, 0.8))
plot_correlation(simpledf, x="b", y="a", value_range=(0.5, 0.8))
@pytest.mark.xfail # type: ignore
def test_sanity_compute_fail_7(simpledf: dd.DataFrame) -> None:
compute_correlation(simpledf, x="b", y="a", value_range=(0.5, 0.8), k=3)
plot_correlation(simpledf, x="b", y="a", value_range=(0.5, 0.8), k=3)
def test_compute_pearson() -> None:
array = np.random.rand(100, 10)
darray = da.from_array(array)
a = pearson_nxn(darray).compute()
b = pd.DataFrame(data=array).corr("pearson").values
assert np.isclose(a, b).all()
for i in range(array.shape[1]):
_, a = pearson_1xn(darray[:, i], darray)
assert np.isclose(a, np.sort(b[:, i])).all()
def test_compute_spearman() -> None:
array = np.random.rand(100, 10)
darray = da.from_array(array)
a = spearman_nxn(darray).compute()
b = pd.DataFrame(data=array).corr("spearman").values
assert np.isclose(a, b).all()
for i in range(array.shape[1]):
_, a = spearman_1xn(darray[:, i], darray)
assert np.isclose(a, np.sort(b[:, i])).all()
def test_compute_kendall() -> None:
array = np.random.rand(100, 10)
darray = da.from_array(array)
a = kendall_tau_nxn(darray).compute()
b = | pd.DataFrame(data=array) | pandas.DataFrame |
"""
Utilities for public_data.
"""
import gzip
try:
import ujson as json
except ImportError:
import json
import numpy as np
import pandas as pd
import warnings
def read_json(filename):
"""
Read a JSON file.
Parameters
----------
filename : str
Filename. Must be of type .json or .json.gz.
"""
if filename.endswith('json.gz'):
with gzip.open(filename) as f:
tree = json.load(f)
elif filename.endswith('.json'):
with open(filename) as f:
tree = json.load(f)
else:
raise ValueError('Filename must be of type .json or .json.gz.')
return tree
def read_sid_cid_map(filename):
"""
Read SID->CID map.
Parameters
----------
filename : str
SID->CID map.
"""
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(filename)
try:
sid_cid = {}
for line in f:
sid, cid = line.split()
assert int(sid) not in sid_cid
sid_cid[int(sid)] = int(cid)
return sid_cid
finally:
f.close()
class PcbaJsonParser(object):
"""
Parser for PubChemBioAssay JSON.
Parameters
----------
filename : str
Filename.
"""
def __init__(self, filename):
self.tree = read_json(filename)
self.data = None
# move in to the assay description
try:
# FTP format
self.root = self.tree['PC_AssaySubmit']['assay']['descr']
except KeyError:
# REST format
# should just be one record per file
assert len(self.tree['PC_AssayContainer']) == 1
self.root = self.tree['PC_AssayContainer'][0]['assay']['descr']
def get_name(self):
"""
Get assay name.
"""
return self.root['name']
def get_aid(self):
"""
Get assay AID.
"""
return self.root["aid"]["id"]
def get_activity_outcome_method(self):
"""
Get activity outcome method.
"""
#
if "activity_outcome_method" in self.root:
method = self.root["activity_outcome_method"]
if "counter" in self.get_name().lower():
method = "counterscreen"
return method
else:
return None
def get_description(self):
"""
Get assay description.
"""
if isinstance(self.root['description'], list):
return '\n'.join(
[line.strip() for line in self.root['description']])
else:
return self.root['description']
def get_protocol(self):
"""
Get assay protocol.
"""
if isinstance(self.root['protocol'], list):
return '\n'.join([line.strip() for line in self.root['protocol']])
else:
return self.root['protocol']
def get_target(self):
"""
Get assay target.
TODO: Decide which fields are important. We may be able to match
targets by mol-id.
Returns
-------
target : dict
A dictionary containing keys for target information types, such
as 'name', 'mol-id', and 'molecule-type'.
"""
if 'target' in self.root:
return self.root['target']
else:
return None
def get_comment(self):
"""
Get assay comment.
"""
if "comment" in self.root:
if isinstance(self.root["comment"], list):
return "\n".join([line.strip() for line in self.root["comment"]])
else:
return self.root["comment"]
else:
return None
def get_results(self):
"""
Get Assay result fields.
"""
if "results" in self.root:
return self.root["results"]
else:
return None
def get_revision(self):
"""
Get assay revision.
"""
if "revision" in self.root:
return self.root["revision"]
else:
return None
def get_result_names(self, from_tid=False):
"""
Get column names for assay-specific result fields.
Parameters
----------
from_tid : bool, optional (default False)
Return a dict mapping TIDs to field names. If False, returns a list of
field names.
"""
tids = {}
names = []
for field in self.get_results():
name = field['name'].strip() # clean up extra whitespace
if name in names:
warnings.warn(
'Duplicated field "{}" in AID {}'.format(name, self.get_aid()))
tids[field['tid']] = name
names.append(name)
if from_tid:
return tids
else:
return names
def get_data(self):
"""
Get assay data in a Pandas dataframe.
"""
if self.data is not None:
return self.data
try:
data = self.tree['PC_AssaySubmit']['data']
except KeyError:
return None
# populate dataframe
tids = self.get_result_names(from_tid=True)
series = []
for dp in data:
point = {}
for key, value in dp.iteritems():
if key == 'data': # assay-specific fields
for col in value:
col_name = tids[col['tid']]
assert len(col['value']) == 1
for col_value in col['value'].itervalues():
point[col_name] = col_value
else: # generic fields
point[key] = value
series.append(point)
df = pd.DataFrame(series)
# add missing columns filled with null values
for col in tids.itervalues():
if col not in df.columns:
df.loc[:, col] = None
assert len(df) == len(data)
self.data = df
return df
def get_selected_data(self, column_mapping, with_aid=False, phenotype=None):
"""
Get a subset of the assay data.
Parameters
----------
column_mapping : dict
Mapping between output dataframe column names and assay data column names
from which to extract data (e.g. 'potency': 'EC50'). Values that are not
found in the assay data columns will be treated as constants.
with_aid : bool, optional (default False)
Annotate each data point with the AID for this assay.
phenotype : str, optional
Default phenotype for non-inactive data points (e.g., 'inhibitor').
Returns
-------
A pandas dataframe containing the selected assay data.
"""
# make a copy of the column mapping
column_mapping = column_mapping.copy()
# add standard PCBA columns
column_mapping['sid'] = 'sid'
column_mapping['outcome'] = 'outcome'
# add optional columns
if with_aid:
column_mapping['aid'] = self.get_aid()
elif 'aid' in column_mapping:
warnings.warn('column_mapping contains "aid"')
# get selected columns
data = self.get_data()
if data is None:
return None
old_cols, new_cols = [], []
constants = {}
for new_col, old_col in column_mapping.iteritems():
if old_col not in data.columns:
constants[new_col] = old_col
else:
new_cols.append(new_col)
old_cols.append(old_col)
df = data[old_cols] # get selected columns from assay data
df.columns = new_cols # rename columns to match column_mapping
for new_col, value in constants.iteritems():
df.insert(0, new_col, value) # add constant-valued columns
# process phenotypes
if phenotype is not None and 'phenotype' not in column_mapping:
df.insert(len(df.columns), 'phenotype', phenotype)
df.loc[df['outcome'] == 'inactive', 'phenotype'] = 'inactive'
return df
class PcbaPandasHandler(object):
"""
Writes data from PCBA into pandas dataframes.
Parameters
----------
"""
def __init__(self):
self.index = 0
self.df = pd.DataFrame(
columns=["name", "aid", "activity_outcome_method",
"description", "comment", "results", "revision"])
self.df['aid'] = self.df['aid'].astype(int) # force AID to int
def add_dataset(self, filename):
"""
Adds dataset to internal dataframe.
"""
parser = PcbaJsonParser(filename)
row = {}
row["name"] = parser.get_name()
row["aid"] = parser.get_aid()
row["activity_outcome_method"] = parser.get_activity_outcome_method()
row["description"] = parser.get_description()
row["comment"] = parser.get_comment()
row["results"] = parser.get_results()
row["revision"] = parser.get_revision()
self.df.loc[self.index] = pd.Series(row)
self.index += 1 # increment index
def get_dataset(self, index):
"""
Fetches information for a particular dataset by index.
"""
return self.df.loc[index]
def to_csv(self, out):
"""
Writes internal dataframe to provided location as csv.
"""
self.df.to_csv(out)
class PcbaDataExtractor(object):
"""
Extract selected data from PCBA assay data.
Parameters
----------
filename : str
PCBA JSON assay data file.
config : dict or pd.Series
Mapping between output dataframe column names and assay data column names
from which to extract data (e.g. 'potency': 'EC50'). Values that are not
found in the assay data columns will be treated as constants.
with_aid : bool, optional (default False)
Annotate each data point with the AID for this assay.
"""
def __init__(self, filename, config, with_aid=True):
self.filename = filename
self.parser = PcbaJsonParser(filename)
self.with_aid = with_aid
self.config = config
self.phenotype = None # default phenotype for this assay
self._check_config() # check configuration
def get_data(self, lower=True, sid_cid=None):
"""
Get selected data from the assay.
Parameters
----------
lower : bool, optional (default True)
Lowercase string fields for consistency.
sid_cid : dict, optional
SID->CID mapping. If provided, adds a 'cid' column to the dataframe.
"""
data = self.parser.get_selected_data(self.config, with_aid=self.with_aid,
phenotype=self.phenotype)
# map SIDs to CIDs
if sid_cid is not None:
cids = [sid_cid.get(sid) for sid in data['sid'].values]
data.loc[:, 'cid'] = pd.Series(cids, index=data.index)
# lowercase string fields for consistency
if lower:
for col, dtype in data.dtypes.iteritems():
if dtype == np.dtype('object'):
data.loc[:, col] = data[col].str.lower()
return data
def _check_config(self):
"""
Check the column mapping and other configuration parameters.
"""
# make a copy of the config
config = self.config.copy()
# remove null columns
for key, value in config.iteritems():
if | pd.isnull(value) | pandas.isnull |
__author__ = 'saeedamen'
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import numpy
from finmarketpy.util.marketconstants import MarketConstants
from findatapy.util import SwimPool
from findatapy.util import LoggerManager
import pickle
#import zlib
#import lz4framed # conda install -c conda-forge py-lz4framed
import blosc
import pickle
market_constants = MarketConstants()
class Backtest(object):
"""Conducts backtest for strategies trading assets. Assumes we have an input of total returns. Reports historical return statistics
and returns time series.
"""
def __init__(self):
self._pnl = None
self._portfolio = None
return
def calculate_diagnostic_trading_PnL(self, asset_a_df, signal_df, further_df=[], further_df_labels=[]):
"""Calculates P&L table which can be used for debugging purposes,
The table is populated with asset, signal and further dataframes provided by the user, can be used to check signalling methodology.
It does not apply parameters such as transaction costs, vol adjusment and so on.
Parameters
----------
asset_a_df : DataFrame
Asset prices
signal_df : DataFrame
Trade signals (typically +1, -1, 0 etc)
further_df : DataFrame
Further dataframes user wishes to output in the diagnostic output (typically inputs for the signals)
further_df_labels
Labels to append to the further dataframes
Returns
-------
DataFrame with asset, trading signals and returns of the trading strategy for diagnostic purposes
"""
calculations = Calculations()
asset_rets_df = calculations.calculate_returns(asset_a_df)
strategy_rets = calculations.calculate_signal_returns(signal_df, asset_rets_df)
reset_points = ((signal_df - signal_df.shift(1)).abs())
asset_a_df_entry = asset_a_df.copy(deep=True)
asset_a_df_entry[reset_points == 0] = numpy.nan
asset_a_df_entry = asset_a_df_entry.ffill()
asset_a_df_entry.columns = [x + '_entry' for x in asset_a_df_entry.columns]
asset_rets_df.columns = [x + '_asset_rets' for x in asset_rets_df.columns]
strategy_rets.columns = [x + '_strat_rets' for x in strategy_rets.columns]
signal_df.columns = [x + '_final_signal' for x in signal_df.columns]
for i in range(0, len(further_df)):
further_df[i].columns = [x + '_' + further_df_labels[i] for x in further_df[i].columns]
flatten_df = [asset_a_df, asset_a_df_entry, asset_rets_df, strategy_rets, signal_df]
for f in further_df:
flatten_df.append(f)
return calculations.pandas_outer_join(flatten_df)
def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, run_in_parallel):
"""Calculates P&L of a trading strategy and statistics to be retrieved later
Calculates the P&L for each asset/signal combination and also for the finally strategy applying appropriate
weighting in the portfolio, depending on predefined parameters, for example:
static weighting for each asset
static weighting for each asset + vol weighting for each asset
static weighting for each asset + vol weighting for each asset + vol weighting for the portfolio
Parameters
----------
br : BacktestRequest
Parameters for the backtest specifying start date, finish data, transaction costs etc.
asset_a_df : pandas.DataFrame
Asset prices to be traded
signal_df : pandas.DataFrame
Signals for the trading strategy
contract_value_df : pandas.DataFrame
Daily size of contracts
"""
calculations = Calculations()
risk_engine = RiskEngine()
# # do an outer join first, so can fill out signal and fill it down
# # this captures the case where the signal changes on an asset holiday
# # it will just get delayed till the next tradable day when we do this
# asset_df_2, signal_df_2 = asset_a_df.align(signal_df, join='outer', axis='index')
# signal_df = signal_df_2.fillna(method='ffill')
#
# # now make sure the dates of both traded asset and signal are aligned properly
# # and use as reference only those days where we have asset information
# asset_df, signal_df = asset_a_df.align(signal_df, join='left', axis = 'index')
logger = LoggerManager().getLogger(__name__)
logger.info("Calculating trading P&L...")
signal_df = signal_df.shift(br.signal_delay)
asset_df, signal_df = calculations.join_left_fill_right(asset_a_df, signal_df)
if (contract_value_df is not None):
asset_df, contract_value_df = asset_df.align(contract_value_df, join='left', axis='index')
contract_value_df = contract_value_df.fillna(
method='ffill') # fill down asset holidays (we won't trade on these days)
# non-trading days of the assets (this may of course vary between the assets we are trading
# if they are from different asset classes)
non_trading_days = numpy.isnan(asset_df.values)
# only allow signals to change on the days when we can trade assets
signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down
tc = br.spot_tc_bp
signal_cols = signal_df.columns.values
asset_df_cols = asset_df.columns.values
pnl_cols = []
for i in range(0, len(asset_df_cols)):
pnl_cols.append(asset_df_cols[i] + " / " + signal_cols[i])
asset_df = asset_df.fillna(method='ffill') # fill down asset holidays (we won't trade on these days)
returns_df = calculations.calculate_returns(asset_df)
# apply a stop loss/take profit to every trade if this has been specified
# do this before we start to do vol weighting etc.
if br.take_profit is not None and br.stop_loss is not None:
returns_df = calculations.calculate_returns(asset_df)
# makes assumption that signal column order matches that of returns
temp_strategy_rets_df = calculations.calculate_signal_returns_as_matrix(signal_df, returns_df)
trade_rets_df = calculations.calculate_cum_rets_trades(signal_df, temp_strategy_rets_df)
# pre_signal_df = signal_df.copy()
signal_df = calculations.calculate_risk_stop_signals(signal_df, trade_rets_df, br.stop_loss, br.take_profit)
# make sure we can't trade where asset price is undefined and carry over signal
signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals
signal_df = signal_df.fillna(method='ffill') # fill these down (when asset is not trading
# for debugging purposes
# if True:
# signal_df_copy = signal_df.copy()
# trade_rets_df_copy = trade_rets_df.copy()
#
# asset_df_copy.columns = [x + '_asset' for x in temp_strategy_rets_df.columns]
# temp_strategy_rets_df.columns = [x + '_strategy_rets' for x in temp_strategy_rets_df.columns]
# signal_df_copy.columns = [x + '_final_signal' for x in signal_df_copy.columns]
# trade_rets_df_copy.columns = [x + '_cum_trade' for x in trade_rets_df_copy.columns]
#
# to_plot = calculations.pandas_outer_join([asset_df_copy, pre_signal_df, signal_df_copy, trade_rets_df_copy, temp_strategy_rets_df])
# to_plot.to_csv('test.csv')
# do we have a vol target for individual signals?
if br.signal_vol_adjust is True:
leverage_df = risk_engine.calculate_leverage_factor(returns_df, br.signal_vol_target,
br.signal_vol_max_leverage,
br.signal_vol_periods, br.signal_vol_obs_in_year,
br.signal_vol_rebalance_freq,
br.signal_vol_resample_freq,
br.signal_vol_resample_type,
period_shift=br.signal_vol_period_shift)
signal_df = pandas.DataFrame(
signal_df.values * leverage_df.values, index=signal_df.index, columns=signal_df.columns)
self._individual_leverage = leverage_df # contains leverage of individual signal (before portfolio vol target)
_pnl = calculations.calculate_signal_returns_with_tc_matrix(signal_df, returns_df, tc=tc)
_pnl.columns = pnl_cols
adjusted_weights_matrix = None
# portfolio is average of the underlying signals: should we sum them or average them or use another
# weighting scheme?
if br.portfolio_combination is not None:
if br.portfolio_combination == 'sum' and br.portfolio_combination_weights is None:
portfolio = pandas.DataFrame(data=_pnl.sum(axis=1), index=_pnl.index, columns=['Portfolio'])
elif br.portfolio_combination == 'mean' and br.portfolio_combination_weights is None:
portfolio = pandas.DataFrame(data=_pnl.mean(axis=1), index=_pnl.index, columns=['Portfolio'])
adjusted_weights_matrix = self.create_portfolio_weights(br, _pnl, method='mean')
elif 'weighted' in br.portfolio_combination and isinstance(br.portfolio_combination_weights, dict):
# get the weights for each asset
adjusted_weights_matrix = self.create_portfolio_weights(br, _pnl, method=br.portfolio_combination)
portfolio = pandas.DataFrame(data=(_pnl.values * adjusted_weights_matrix), index=_pnl.index)
is_all_na = pandas.isnull(portfolio).all(axis=1)
portfolio = pandas.DataFrame(portfolio.sum(axis=1), columns=['Portfolio'])
# overwrite days when every asset PnL was null is NaN with nan
portfolio[is_all_na] = numpy.nan
else:
portfolio = pandas.DataFrame(data=_pnl.mean(axis=1), index=_pnl.index, columns=['Portfolio'])
adjusted_weights_matrix = self.create_portfolio_weights(br, _pnl, method='mean')
portfolio_leverage_df = pandas.DataFrame(data=numpy.ones(len(_pnl.index)), index=_pnl.index,
columns=['Portfolio'])
# should we apply vol target on a portfolio level basis?
if br.portfolio_vol_adjust is True:
# calculate portfolio leverage
portfolio_leverage_df = risk_engine.calculate_leverage_factor(portfolio,
br.portfolio_vol_target,
br.portfolio_vol_max_leverage,
br.portfolio_vol_periods,
br.portfolio_vol_obs_in_year,
br.portfolio_vol_rebalance_freq,
br.portfolio_vol_resample_freq,
br.portfolio_vol_resample_type,
period_shift=br.portfolio_vol_period_shift)
# portfolio, portfolio_leverage_df = risk_engine.calculate_vol_adjusted_returns(portfolio, br = br)
# multiply portfolio leverage * individual signals to get final position signals
length_cols = len(signal_df.columns)
leverage_matrix = numpy.transpose(
numpy.repeat(portfolio_leverage_df.values.flatten()[numpy.newaxis, :], length_cols, 0))
# final portfolio signals (including signal & portfolio leverage)
portfolio_signal = pandas.DataFrame(
data=numpy.multiply(leverage_matrix, signal_df.values),
index=signal_df.index, columns=signal_df.columns)
# later when we plot the portfolio components, we do that without weighting the individual components
portfolio_signal_before_weighting = portfolio_signal.copy()
if br.portfolio_combination is not None:
if 'sum' in br.portfolio_combination:
pass
elif br.portfolio_combination == 'mean' \
or (br.portfolio_combination == 'weighted' and isinstance(br.portfolio_combination_weights, dict)):
portfolio_signal = pandas.DataFrame(data=(portfolio_signal.values * adjusted_weights_matrix),
index=portfolio_signal.index,
columns=portfolio_signal.columns)
else:
portfolio_signal = pandas.DataFrame(data=(portfolio_signal.values * adjusted_weights_matrix),
index=portfolio_signal.index,
columns=portfolio_signal.columns)
portfolio_total_longs, portfolio_total_shorts, portfolio_net_exposure, portfolio_total_exposure \
= self.calculate_exposures(portfolio_signal)
# apply position limits?
position_clip_adjustment = risk_engine.calculate_position_clip_adjustment \
(portfolio_net_exposure, portfolio_total_exposure, br)
# if we have any position clip adjustment, for example related to max position sizes
if position_clip_adjustment is not None:
position_clip_adjustment_matrix = numpy.transpose(
numpy.repeat(position_clip_adjustment.values.flatten()[numpy.newaxis, :], length_cols, 0))
# recalculate portfolio signals after adjustment (for individual components - without
# weighting each signal separately)
portfolio_signal_before_weighting = pandas.DataFrame(
data=(portfolio_signal_before_weighting.values * position_clip_adjustment_matrix),
index=portfolio_signal_before_weighting.index,
columns=portfolio_signal_before_weighting.columns)
# recalculate portfolio signal after adjustment (for portfolio level positions)
portfolio_signal = pandas.DataFrame(
data=(portfolio_signal.values * position_clip_adjustment_matrix),
index=portfolio_signal.index,
columns=portfolio_signal.columns)
# recalculate portfolio leverage with position constraint (multiply vectors elementwise)
portfolio_leverage_df = pandas.DataFrame(
data=(portfolio_leverage_df.values * position_clip_adjustment.values),
index=portfolio_leverage_df.index,
columns=portfolio_leverage_df.columns)
# recalculate total long, short, net and absolute exposures of the whole portfolio after the position
# clip adjustment
portfolio_total_longs, portfolio_total_shorts, portfolio_net_exposure, portfolio_total_exposure \
= self.calculate_exposures(portfolio_signal)
# calculate final portfolio returns with the amended portfolio leverage (by default just 1s)
portfolio = calculations.calculate_signal_returns_with_tc_matrix(portfolio_leverage_df, portfolio, tc=tc)
# assign all the property variables
self._signal = signal_df # individual signals (before portfolio leverage)
self._portfolio_signal = portfolio_signal # individual signals (AFTER portfolio leverage/constraints)
self._portfolio_leverage = portfolio_leverage_df # leverage on portfolio
self._portfolio = portfolio
# calculate each period of trades
self._portfolio_trade = self._portfolio_signal - self._portfolio_signal.shift(1)
# expressing trades/positions in terms of notionals
self._portfolio_signal_notional = None
self._portfolio_signal_trade_notional = None
# expressing trades/positions in terms of contracts (useful for futures)
self._portfolio_signal_contracts = None
self._portfolio_signal_trade_contracts = None
self._portfolio_total_longs = portfolio_total_longs
self._portfolio_total_shorts = portfolio_total_shorts
self._portfolio_net_exposure = portfolio_net_exposure
self._portfolio_total_exposure = portfolio_total_exposure
self._portfolio_total_longs_notional = None
self._portfolio_total_shorts_notional = None
self._portfolio_net_exposure_notional = None
self._portfolio_total_exposure_notional = None
self._portfolio_signal_trade_notional_sizes = None
# also create other measures of portfolio
# portfolio & trades in terms of a predefined notional (in USD)
# portfolio & trades in terms of contract sizes (particularly useful for futures)
if br.portfolio_notional_size is not None:
# express positions in terms of the notional size specified
self._portfolio_signal_notional = self._portfolio_signal * br.portfolio_notional_size
self._portfolio_signal_trade_notional = self._portfolio_signal_notional - self._portfolio_signal_notional.shift(
1)
df_trades_sizes = | pandas.DataFrame() | pandas.DataFrame |
"""
The data_cleaner module is used to clean missing or NaN values from pandas dataframes (e.g. removing NaN, imputation, etc.)
"""
import pandas as pd
import numpy as np
import logging
from sklearn.preprocessing import Imputer
import os
from scipy.linalg import orth
log = logging.getLogger('mastml')
def flag_outliers(df, conf_not_input_features, savepath, n_stdevs=3):
"""
Method that scans values in each X feature matrix column and flags values that are larger than 3 standard deviations
from the average of that column value. The index and column values of potentially problematic points are listed and
written to an output file.
Args:
df: (dataframe), pandas dataframe containing data
Returns:
None, just writes results to file
"""
n_rows = df.shape[0]
outlier_dict = dict()
for col in df.columns:
outlier_rows = list()
outlier_vals = list()
if col not in conf_not_input_features:
avg = np.average(df[col])
stdev = np.std(df[col])
for row in range(n_rows):
if df[col].iloc[row] > avg + n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
elif df[col].iloc[row] < avg - n_stdevs*stdev:
outlier_rows.append(row)
outlier_vals.append(df[col].iloc[row])
else:
pass
outlier_dict[col] = (outlier_rows, outlier_vals)
| pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
# In[2]:
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# In[3]:
df = pd.read_csv('data.csv')
# ## Grouping Players Dataset by Teams
# In[4]:
df_teams = df
# ### Dropping NaNs in `Club` and `Position` Features
# In[5]:
df_teams = df_teams.drop('Unnamed: 0', axis=1)
# In[6]:
df_teams = df_teams.dropna(subset=['Club', 'Position'], axis=0)
# ### Goal Keeper rows: Replacing NaNs with 0s in `Position` Column
# In[7]:
# Raplacing NaNs with 0s for Goal Keeper rows
df_teams.iloc[:,27:53] = df_teams.iloc[:,27:53].fillna(value=0)
# ### Dropping `Joined` and Replacing NaNs in `Release Clause` and `Loaned From`
# In[8]:
# Dropping 'Joined' column
df_teams = df_teams.drop('Joined', axis=1)
# In[9]:
# Replacing NaNs in 'Release Clause' and 'Loaned From' features
df_teams['Release Clause'] = df_teams['Release Clause'].fillna(0)
df_teams['Loaned From'] = df_teams['Loaned From'].fillna('Not Loaned')
# ### Adding `Field Position` Feature
# In[10]:
defense = ['CB', 'RB', 'LB', 'RWB', 'LWB', 'RCB', 'LCB']
midfield = ['RW', 'LW', 'RM', 'LM', 'CM', 'CDM', 'CAM', 'RCM', 'LCM', 'LAM', 'RAM', 'RDM', 'LDM']
attack = ['ST', 'CF', 'RF', 'LF', 'RS', 'LS']
goalkeeper = ['GK']
# In[11]:
# function to create Field Position for each player
def field(row):
if row['Position'] in defense:
val = 'Defense'
elif row['Position'] in midfield:
val = 'Midfield'
elif row['Position'] in attack:
val = 'Attack'
else:
val = 'GK'
return val
# In[12]:
df_teams['Field Position'] = df_teams.apply(field, axis=1)
# ### Editing values in `Value` and `Wage` columns
# In[13]:
def change_value(row):
if (row['Value'][-1]=='K'):
return int(pd.to_numeric(row['Value'][1:-1])*1000)
elif (row['Value'][-1]=='M'):
return int( | pd.to_numeric(row['Value'][1:-1]) | pandas.to_numeric |
##########################################################################
## Summary
##########################################################################
'''
Creates flat table of decisions from our Postgres database and runs the prediction pipeline.
Starting point for running our models.
'''
##########################################################################
## Imports & Configuration
##########################################################################
import logging
import numpy, pandas
from sklearn import metrics
from sklearn.metrics import classification_report
#Configure logging. See /logs/example-logging.py for examples of how to use this.
logging_filename = "../logs/pipeline.log"
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
#Pushes everything from the logger to the command line output as well.
logging.getLogger().addHandler(logging.StreamHandler())
#Allow modules to import each other at parallel file structure (TODO clean up this configuration in a refactor, it's messy...)
from inspect import getsourcefile
import os, sys, json
current_path = os.path.abspath(getsourcefile(lambda:0))
current_dir = os.path.dirname(current_path)
parent_dir = current_dir[:current_dir.rfind(os.path.sep)]
repo_dir = parent_dir[:parent_dir.rfind(os.path.sep)]
sys.path.insert(0, parent_dir)
import database_management
##########################################################################
## Classes
##########################################################################
class ManyModels:
'''
A wrapper class for training multiple sklearn models on a single dataset
The wrapper contains:
-The models themselves (fitted or not), passed as a dictionary from the calling function
-X and y arrays of training data.
-an X_test set of testing data
-The predicted answers of all models, stored as a dataframe with rows matching the X_test dataset
Not optimized for memory use - instead it is designed for as much flexibility and access to source data,
models, and prediction performance as possible for use in a learning context.
Example Use:
#set it up:
modeler = ManyModels()
modeler.models = {} #change this to a dictionary of model instances
modeler.X = X_train
modeler.y = y_train
modeler.y_names = ['A','B']
#Fit:
modeler.fit("RandomForestClassifier") #fit just one model
modeler.fit(model_list=['KNeighborsClassifier_12', 'RandomForestClassifier']) #fit a list of models
modeler.fit() #fits all models
#Attach testing data
modeler.X_test = X_test
modeler.y_test = y_test
#Predict:
predicted_df = modeler.predict() #returns a dataframe of the predicted answers for each model, but also stores the fitted models on the modeler object
'''
def __init__(self):
self.models = {} #dict of 'modelname':sklearn.model_instance
self.X = numpy.array([[],[]]) #blank 2-d array, contains training data
self.y = numpy.array([]) #blank 1-d array, contains training answers
self.pipe = None #a pipeline for transforming this data. Should not contain a final model to predict.
self.answers = pandas.DataFrame() #Pandas dataframe where each row is a row of the test dataset, each column is a different model_list
self.scores = {} #Nested dictionary of shape {'modelname': {'precision': #, 'recall': #, 'accuracy': #, 'f1': # }}
self.X_test = None
self.y_test = None
self.y_names = []
self.version = ""
self.notes = ""
#@property lets us add additional logic to the getters and setters for the X_test property (e.g., resetting the answers and scores)
@property
def X_test(self):
return self.__X_test
@X_test.setter
def X_test(self, X_test=None):
self.__X_test = X_test
#reset since rows will no longer match
self.answers = pandas.DataFrame()
self.scores = {}
@property
def y_test(self):
return self.__y_test
@y_test.setter
def y_test(self, y_test=None):
self.__y_test = y_test
#reset since rows will no longer match
self.answers = pandas.DataFrame()
self.scores = {}
def fit(self, model_list=None):
model_list = self.clean_model_list(model_list)
for key in model_list:
self.models[key].fit(self.X, self.y)
print(" fitted model: " + key)
return self
def predict(self, model_list=None):
model_list = self.clean_model_list(model_list)
for key in model_list:
self.answers[key] = self.models[key].predict(self.X_test)
self.scores[key] = { }
if self.y_test is not None:
self.scores[key]['precision'] = metrics.precision_score(y_true = self.y_test, y_pred = self.answers[key].as_matrix(), average=None)
self.scores[key]['recall'] = metrics.recall_score(y_true = self.y_test, y_pred=self.answers[key], average=None)
self.scores[key]['accuracy'] = metrics.accuracy_score(y_true = self.y_test, y_pred=self.answers[key])
self.scores[key]['f1'] = metrics.f1_score(y_true = self.y_test, y_pred=self.answers[key], average=None)
self.scores[key]['classification_report'] = classification_report(y_true = self.y_test, y_pred = self.answers[key].as_matrix(), target_names=self.y_names)
return self.answers
def clean_model_list(self, model_list):
#Resolve defaults and turn a single string into a list
if model_list is None:
model_list = list(self.models.keys())
if isinstance(model_list, str):
model_list = [model_list]
if isinstance(model_list, list):
return model_list
else:
raise ValueError('A provided model_list must be a list or a string.')
##########################################################################
## Functions
##########################################################################
def test_import():
print("I import correctly!")
def run_simple_query():
#Connect to the database
database_connection = database_management.get_database_connection('database')
query_result = database_connection.execute("select snapshot_id, table_name from manifest where snapshot_id='c2005-07'")
for query_row in query_result:
print(query_row['snapshot_id'] + " | " + query_row['table_name'])
def get_meta_data(filepath=None):
#default path is meta.json in the same folder as this file
if filepath==None:
filepath = 'meta.json'
if os.path.exists(filepath):
with open(filepath, 'r') as f:
meta = json.load(f)
return meta
else:
raise FileNotFoundError("Couldn't find the file {}!".format(filepath))
def list_to_dict(list):
'''
Makes a dictionary. Sets values of a list to the key and index of the list to the value.
For use with meta.json so that we can convert to the format that pandas.map function expects
'''
dict={x:i for i,x in enumerate(list)}
return dict
def get_decisions_table(equal_split = False):
'''
Queries the database to get our full decisions table
equal_split not implemented
'''
logging.info("Getting the decisions data from the database...")
# Open and read the SQL command file as a single buffer
database_connection = database_management.get_database_connection('database')
query_path = "select_decisions_data.sql"
file = open(query_path, 'r')
query_text = file.read()
file.close()
query_dataframe = pandas.read_sql(query_text, database_connection)
return query_dataframe
def get_sample_decisions_table(equal_split = False):
'''
Deprecated - Superseded by get_decisions_table now that it is working.
Queries the database to get a small version of our decisions table for training/testing purposes
'''
logging.info("Getting the sample data from the database...")
# Open and read the SQL command file as a single buffer
database_connection = database_management.get_database_connection('database')
query_path = parent_dir + "\wrangling\decisions_partial_churn_filter.sql"
file = open(query_path, 'r')
query_file_text = file.read()
file.close()
#This query will be built on and/or replaced once we get Kashif's SQL query working
query_text = "select" + """
temp.decision
, rent.hd01_vd01 as median_rent
, c.contract_term_months_qty
, c.assisted_units_count
, c.is_hud_administered_ind
, TRIM(c.program_type_group_name) as program_type_group_name
, c.rent_to_FMR_ratio
, c."0br_count" br0_count
, c."1br_count" br1_count
, c."2br_count" br2_count
, c."3br_count" br3_count
, c."4br_count" br4_count
, c."5plusbr_count" br5_count
""" + "from (" + query_file_text + """
) as temp
inner join contracts as c
on c.contract_number = temp.contract_number and c.snapshot_id = temp.snapshot_id
inner join geocode as g
on c.property_id = g.property_id
inner join acs_rent_median as rent
on g.geoid::text = rent.geo_id2::text
where churn_flag<>'churn'
--need to match to closest rent TODO
and rent.snapshot_id = 'ACS_14_5YR_B25058_with_ann.csv'
"""
both_in_out = " and decision in ('in', 'out')"
just_in = " and decision in ('in')"
just_out = " and decision in ('out')"
if equal_split == False:
query1 = query_text + both_in_out
query_dataframe = | pandas.read_sql(query1, database_connection) | pandas.read_sql |
#Scrape Trustee details
#<NAME>, <NAME>
#11/10/18
#This file scrapes trustee information from the Charity Commission for Northern Ireland website.
################################# Import packages #################################
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
from lxml.html import fromstring
from itertools import cycle
from datetime import datetime
from downloaddate_function import downloaddate
from time import sleep
#from dropbox.files import WriteMode
import requests
import glob
#import dropbox
import pandas as pd
import csv
import os
import io
import numpy as np
# Define a function for generating proxies
def get_proxies():
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"no")]') or i.xpath('.//td[7][contains(text(),"yes")]'): # None on the list have yes at current time
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
print('New proxies grabbed.\n')
return proxies
"""
# Fetch Dropbox authentication
dbtokenpath = 'C:/Users/mcdonndz-local/Desktop/admin/db_token.txt'
#dbtokenpath_pi = '/home/pi/admin/dp_token.txt'
dbtokenfile = open(dbtokenpath, "r")
dbapitoken = dbtokenfile.read()
print(dbapitoken)
dbx = dropbox.Dropbox(dbapitoken) # Create an object for accessing Dropbox API
"""
# Get the current date
ddate = downloaddate()
# Define paths
localdatapath = './'
remotedatapath = './trustee_data/data_raw/' # Dropbox path
remotelogpath = './trustee_data/logs/' # Dropbox path
projpath = './' # Location of syntax
inputfile = remotedatapath + 'ni_charityregister_' + ddate + '.csv'
outputfile = remotedatapath + 'ni_trustee_data_' + ddate + '.csv'
logoutputfile = remotedatapath + 'ni_trustee_log_' + ddate + '.csv'
# Delete output file if already exists
try:
os.remove(outputfile)
except OSError:
pass
# Download latest copy of NI charity register from the Commission's data portal
if not os.path.isfile(inputfile):
import ni_download
print('Finished executing ni_download.py')
print(' ')
print('---------------------------------------------')
print(' ')
sleep(10)
print(' ') # Whitespace used to make the output window more readable
print('>>> Run started') # Header of the output, with the start time.
print('\r')
# Create a panda's dataframe from the input CSV #
pd.set_option('precision', 0)
with open(inputfile, 'rb') as f:
df = pd.read_csv(f)
print(df.dtypes)
df.reset_index(inplace=True)
df.set_index(['Reg charity number'], inplace=True)
regno_list = df.index.values.tolist()
fye_list = df['Date for financial year ending'].values.tolist()
print(len(fye_list)==len(regno_list))
#print(regno_list)
# Define variable names for the output files
varnames = ['Row ID', 'Registered', 'Trustee Name', 'Charity Number', 'Charity Name']
lvarnames = ['timestamp', 'regno', 'url', 'status code', 'execution time']
with open(outputfile, 'a', newline='') as f:
writer = csv.writer(f, varnames)
writer.writerow(varnames)
with open(logoutputfile, 'a') as f:
writer = csv.writer(f, lvarnames)
writer.writerow(lvarnames)
# Define a counter to track how many rows of the input file the script processes
counter = 1
# Scrape proxies
proxytry = 1
try: #Call the get_proxies function and if it fails go to sleep and try again a up to 7 times
proxies = get_proxies()
except:
print('Trying to get proxies:',proxytry)
sleep(sleeptime)
proxies = get_proxies()
proxytry = proxytry + 1
if proxytry > 6:
Print('>>> Failed to get proxies. Confirm site is up and relaunch script. <<<')
quit()
print('Proxies:',proxies)
proxy_pool = cycle(proxies)
# Loop through list of charity numbers and scrape info from webpages
for ccnum in regno_list:
starttime = datetime.now() # Track how long it takes to scrape data for each charity
# Define counter to track number of times the script tries to use a valid proxy
proxytry = 1
while proxytry < 1000:
#try:
proxy = next(proxy_pool) # Grab a proxy from the pool
proxytry = 2000
webadd = 'https://www.charitycommissionni.org.uk/charity-details/?regid=' + str(ccnum) +'&subid=0'
headers = {'http': proxy, 'https': proxy, 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'} # Spoof the user-agent of the request to return the content seen by Chrome.
# Define counter to track number of webpage request attempts; try three times
attempt = 1
try:
while attempt < 4:
rorg = requests.get(webadd, headers=headers) # Grab the page using the URL and headers.
print('\n',proxy) # Checks if script is cycling through proxies
print(webadd)
if rorg.status_code==200: # Only proceed if the webpage can be requested successfully
attempt = 5
html_org = rorg.text # Get the text elements of the page.
soup_org = soup(html_org, 'html.parser') # Parse the text as a BS object.
################################# Charity and Trustee info #################################
if not soup_org.find('p', {"class": "pcg-charity-details__amount pcg-charity-details__amount--removed pcg-contrast__color-alt"}): # If the charity isn't removed from the Register then proceed with scraping trustee info
#try: # This try captures instances where the webpage request was successful but the result is blank page i.e. no charity information
# Capture charity name
charityname = soup_org.find('h1', {"class": "pcg-charity-details__title"}).text
print(charityname)
charname = charityname
boardinfo = soup_org.find("div", class_="table-responsive-sm") # Scrape the whole panel
boardinfo = boardinfo.find_all('tr') # Find all the rows and store them as a list
del boardinfo[0] # Delete the top line which is the headers
trustee = list(map(lambda x : x.text.strip(), boardinfo)) # The name is in it's own tag so easy to find, map/lambda applies something to every item in a list
# Data management #
# Create variables capturing the number of trustees, number of other trusteeships per trustee, and adjust Row ID to begin at 1
# Write to JSON and CSV
dicto = {'ccnum':ccnum, 'charname': charname, 'Trustee':trustee, 'Registered': '1'} # Store the new variables as a dictionary
#df_json = pd.DataFrame(dicto)
#df_json.set_index(['ccnum'], inplace=True)
#df_json.to_json(path_or_buf='Trustee_test_data_loop.json', orient='index')
df_csv = | pd.DataFrame(dicto) | pandas.DataFrame |
import datetime
import logging
import pathlib
import typing
import xml.parsers.expat
from dataclasses import dataclass
from multiprocessing.dummy import Pool as ThreadPool
import pandas as pd
import pyetrade
import pytz
import requests.exceptions
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
)
log = logging.getLogger(__name__)
VALID_INCREMENTS = [1, 2.5, 5, 10, 50, 100]
PUT_INFO_TO_INCLUDE = [
"bid",
"ask",
"lastPrice",
"volume",
"openInterest",
"OptionGreeks",
"strikePrice",
"symbol",
"optionType",
"netChange",
]
@dataclass
class MarketData:
ticker: str
company_name: str
market_price: float
high_52: float
low_52: float
percentile_52: float
beta: float
next_earnings_date: str
class OptionsManager:
def __init__(
self,
consumer_key: str,
consumer_secret: str,
oauth_token: str,
oauth_secret: str,
):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.oauth_token = oauth_token
self.oauth_secret = oauth_secret
self.market = pyetrade.ETradeMarket(
self.consumer_key,
self.consumer_secret,
self.oauth_token,
self.oauth_secret,
dev=False,
)
self.accounts = pyetrade.ETradeAccounts(
self.consumer_key,
self.consumer_secret,
self.oauth_token,
self.oauth_secret,
dev=False,
)
def get_csv_df(self):
sector_path = pathlib.Path(__file__).parent / "data" / "sectors.csv"
csv_df = pd.read_csv(sector_path)
csv_df.fillna("", inplace=True)
return csv_df
def get_all_options_info(
self,
sector="Communication Services",
sub_sector="Comm - Media & Ent",
percentile_of_52_range: int = 25,
min_strike: float = 30,
max_strike: float = 20,
month_look_ahead: int = 3,
min_volume: int = 1,
min_open_interest: int = 1,
min_annualized_return: float = 0.0,
include_next_earnings_date: bool = True,
blue_chip_only: bool = False,
):
csv_df = self.get_csv_df()
# filter csv_df down by sector (if None, don't filter)
if sector:
csv_df = csv_df.loc[csv_df["Sector"] == sector]
# filter csv_df down by sub-sector (if None, don't filter)
if sub_sector:
csv_df = csv_df.loc[csv_df["Sub-Sector"] == sub_sector]
if blue_chip_only:
csv_df = csv_df.loc[csv_df["Blue Chip"] == "Yes"]
# skip some buggy tickers
skip = ["NVR", "KSU"]
tickers = [ticker for ticker in csv_df["Ticker"].unique() if ticker not in skip]
def helper(ticker):
try:
market_data = self.get_market_data(ticker)
except Exception as ex:
log.error(f"Skipping ticker '{ticker}' due to error: {ex}")
return pd.DataFrame()
if market_data.percentile_52 * 100 > percentile_of_52_range:
return pd.DataFrame()
try:
options_info = self.get_options_info(
ticker=ticker,
min_strike=min_strike,
max_strike=max_strike,
increment=1,
month_look_ahead=month_look_ahead,
min_volume=min_volume,
min_open_interest=min_open_interest,
min_annualized_return=min_annualized_return,
include_next_earnings_date=include_next_earnings_date,
)
except xml.parsers.expat.ExpatError as ex:
log.error(f"Skipping {ticker} due to error: {ex}")
return | pd.DataFrame() | pandas.DataFrame |
from functools import reduce
from datetime import datetime as dt
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
matplotlib.use("agg")
COLOR_DEATHS = "#dd6600"
COLOR_RECOVERED = "#dbcd00"
COLOR_ACTIVE = "#2792cb"
COLOR_CONFIRMED_NEW = "#2792cb" # a pattern is added below
HATCH_COLOR = 'white' # currently unused, see line 213
def load(kommune):
df_confirmed_raw = pd.read_csv(
"data/time_series/time_series_covid-19_nrw_confirmed.csv"
)
df_confirmed = (
df_confirmed_raw[df_confirmed_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_confirmed.columns = ["date", "confirmed"]
df_confirmed["confirmed_data_available"] = ~df_confirmed["confirmed"].isna()
df_confirmed.fillna(method="ffill", inplace=True)
df_confirmed["date"] = pd.to_datetime(df_confirmed["date"])
df_confirmed["confirmed_yesterday"] = (
df_confirmed["confirmed"] - df_confirmed["confirmed"].diff()
)
df_confirmed["confirmed_new"] = df_confirmed["confirmed"].diff()
df_confirmed.loc[df_confirmed['confirmed_new'] < 0, ['confirmed_new']] = 0
df_confirmed["confirmed_change_rate"] = df_confirmed["confirmed"].pct_change()
df_recovered_raw = pd.read_csv(
"data/time_series/time_series_covid-19_nrw_recovered.csv"
)
df_recovered = (
df_recovered_raw[df_recovered_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_recovered.columns = ["date", "recovered"]
df_recovered["recovered_data_available"] = ~df_recovered["recovered"].isna()
df_recovered.fillna(method="ffill", inplace=True)
df_recovered["date"] = pd.to_datetime(df_recovered["date"])
df_recovered["recovered_delta"] = df_recovered["recovered"].diff()
df_recovered["recovered_change_rate"] = df_recovered["recovered"].pct_change()
df_deaths_raw = pd.read_csv("data/time_series/time_series_covid-19_nrw_deaths.csv")
df_deaths = (
df_deaths_raw[df_deaths_raw.Kommune == kommune]
.transpose()
.reset_index()
.drop([0])
)
df_deaths.columns = ["date", "deaths"]
df_deaths["deaths_data_available"] = ~df_deaths["deaths"].isna()
df_deaths.fillna(method="ffill", inplace=True)
df_deaths["date"] = pd.to_datetime(df_deaths["date"])
df_deaths["deaths_delta"] = df_deaths["deaths"].diff()
df_deaths["deaths_change_rate"] = df_deaths["deaths"].pct_change()
dfs = [df_confirmed, df_recovered, df_deaths]
df = reduce(lambda left, right: | pd.merge(left, right, on="date") | pandas.merge |
#!/usr/bin/env python3
import sys
import pandas as pd
import numpy as np
def clean_data(filename):
class1 = set()
class2 = set()
class3 = set()
class4 = set()
class5 = set()
class6 = set()
with open("class1_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class1.add(i)
with open("class2_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class2.add(i)
with open("class3_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class3.add(i)
with open("class4_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class4.add(i)
with open("class5_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class5.add(i)
with open("class6_out.txt", "r") as fin:
In = fin.read()
In = In.split(',')
for i in In:
class6.add(i)
df = pd.read_csv(filename, header=None)
fa = filename.split('.')
bad_index = []
for idx, i in enumerate(df.values):
if '?' in i:
bad_index.append(idx)
df.drop(df.index[bad_index], inplace=True)
feature_matrix = np.matrix(df[df.columns[1:]].values)
date = df[df.columns[0]]
target_values = []
for i in date:
if i in class1:
target_values.append('class1')
elif i in class2:
target_values.append('class2')
elif i in class3:
target_values.append('class3')
elif i in class4:
target_values.append('class4')
elif i in class5:
target_values.append('class5')
elif i in class6:
target_values.append('class6')
# print(target_values)
target_values = | pd.Series(target_values) | pandas.Series |
"""
This pipeline first saves individual image maps to the database
- this is an issue because of storage space
1. Select images
2. Apply pre-processing corrections
a. Limb-Brightening
b. Inter-Instrument Transformation
3. Coronal Hole Detection
4. Convert to Map
5. Combine Maps
6. Save to DB
"""
sys.path.append("CHD")
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from chmap.settings.app import App
import chmap.database.db_classes as db_class
import chmap.database.db_funs as db_funcs
import chmap.data.corrections.lbcc.LBCC_theoretic_funcs as lbcc_funcs
import chmap.data.corrections.iit.IIT_pipeline_funcs as iit_funcs
from chmap.maps.util.map_manip import combine_maps
import chmap.utilities.plotting.psi_plotting as EasyPlot
#### ------------ QUERYING PARAMETERS TO UPDATE ------------- #####
# TIME RANGE FOR QUERYING
query_time_min = datetime.datetime(2011, 4, 1, 0, 0, 0)
query_time_max = datetime.datetime(2011, 4, 10, 0, 0, 0)
map_freq = 2 # number of hours... rename
# INITIALIZE DATABASE CONNECTION
# DATABASE PATHS
map_data_dir = App.MAP_FILE_HOME
raw_data_dir = App.RAW_DATA_HOME
hdf_data_dir = App.PROCESSED_DATA_HOME
database_dir = App.DATABASE_HOME
sqlite_filename = App.DATABASE_FNAME
# initialize database connection
use_db = "sqlite"
sqlite_path = os.path.join(database_dir, sqlite_filename)
db_session = db_funcs.init_db_conn_old(db_name=use_db, chd_base=db_class.Base, sqlite_path=sqlite_path)
# INSTRUMENTS
inst_list = ["AIA", "EUVI-A", "EUVI-B"]
# CORRECTION PARAMETERS
n_intensity_bins = 200
R0 = 1.01
del_mu = 0.2
# MAP PARAMETERS
x_range = [0, 2 * np.pi]
y_range = [-1, 1]
map_nycoord = 1600
del_y = (y_range[1] - y_range[0]) / (map_nycoord - 1)
map_nxcoord = (np.floor((x_range[1] - x_range[0]) / del_y) + 1).astype(int)
# generate map x,y grids. y grid centered on equator, x referenced from lon=0
map_y = np.linspace(y_range[0], y_range[1], map_nycoord, dtype='<f4')
map_x = np.linspace(x_range[0], x_range[1], map_nxcoord, dtype='<f4')
# --- 1. Select Images -----------------------------------------------------
# query some images
query_pd = db_funcs.query_euv_images(db_session=db_session, time_min=query_time_min, time_max=query_time_max)
# generate a dataframe to record methods
# maybe this needs to be made a function?
# methods_template is a combination of Meth_Defs and Var_Defs columns
meth_columns = []
for column in db_class.Meth_Defs.__table__.columns:
meth_columns.append(column.key)
defs_columns = []
for column in db_class.Var_Defs.__table__.columns:
defs_columns.append(column.key)
df_cols = set().union(meth_columns, defs_columns, ("var_val", ))
methods_template = | pd.DataFrame(data=None, columns=df_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
from unittest import TestCase
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.datasets import load_iris
from utilities.preprocessing import standard_scale, min_max_scale
from pytest import raises
class RollingStatsTests:
def test_scaled_values(self):
predictors = load_iris().data
scaler = self._scaler()
expected = scaler.fit_transform(predictors)
output = self._func(predictors)
assert np.allclose(expected, output)
class StandardScaleTests(TestCase, RollingStatsTests):
def setUp(self):
super().setUp()
self._func = standard_scale
self._scaler = StandardScaler
def test_standard_scale_ddof_one(self):
predictors = load_iris().data
df = | pd.DataFrame(predictors) | pandas.DataFrame |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
Subsets and Splits