prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3.6
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Create charts from the benchmark results
# ----------------------------------------------------------------------------
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import numpy as np
import pandas as pd
import seaborn as sns
# This script should be run inside the results directory.
sns.set_style("darkgrid")
# Selected from https://matplotlib.org/users/colormaps.html#qualitative
sns.set_palette(sns.color_palette("tab20", n_colors=11))
# Name for Pool Size Parameter in results
param_pool_size = "Object Pool Size"
# Adjust left for single plot
left_adjust_single = 0.2
# Adjust left for multiple plots
left_adjust_multiple = 0.12
def print_dataframe(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
def save_plot(df, title, filename, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2, print_data=False,
formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
g = sns.factorplot(x=x, y="Score", hue=hue, col=col, data=df, kind='bar',
size=5, aspect=1, col_wrap=col_wrap, legend=False)
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.subplots_adjust(top=0.9, left=left)
g.fig.suptitle(title)
plt.legend(loc='best', title=hue, frameon=True)
plt.savefig(filename)
plt.clf()
plt.close(fig)
# Plot bar charts with error bars
# Some links helped:
# https://stackoverflow.com/a/42033734/1955702
# https://stackoverflow.com/a/30428808/1955702
# https://matplotlib.org/devdocs/gallery/api/barchart.html#sphx-glr-gallery-api-barchart-py
def barplot_with_errorbars(x, y, yerr, x_values, hue_values, label, **kwargs):
# x_values and benchmarks must be sorted
data = kwargs.pop("data")
x_values_length = len(x_values)
n = np.arange(x_values_length)
offsets = (np.arange(len(hue_values)) - np.arange(len(hue_values)).mean()) / (len(hue_values) + 1.)
width = np.diff(offsets).mean()
# Make sure x axis data is sorted
data = data.sort_values(x)
data_length = len(data)
if data_length < x_values_length:
print('WARN: Not enough data points for %s. Expected %d, Found %d' % (label, x_values_length, data_length))
for i, benchmark in enumerate(hue_values):
if label == benchmark:
plt.bar(n[:data_length] + offsets[i], data[y], width=width, label=label, yerr=data[yerr], capsize=2)
plt.xticks(n, x_values)
def save_plot_with_error_bars(df, title, filename, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2,
print_data=False,
formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
x_values = sorted(df[x].unique())
hue_values = sorted(df[hue].unique())
g = sns.FacetGrid(df, hue=hue, col=col, size=5, aspect=1, col_wrap=col_wrap)
g = g.map_dataframe(barplot_with_errorbars, x, "Score", "Score Error (99.9%)", x_values, hue_values)
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.subplots_adjust(top=0.9, left=left)
g.fig.suptitle(title)
plt.legend(loc='best', title=hue, frameon=True)
plt.savefig(filename)
plt.clf()
plt.close(fig)
def save_plots(df, title, filename_prefix, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2,
print_data=False, formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
# Save two plots with and without error bars
# Plotting errorbars with dataframe data in factorplot is not directly supported.
# First plot is important and must be used to verify the accuracy of the plot with error bars.
save_plot(df, title, filename_prefix + '.png', x=x, hue=hue, col=col, col_wrap=col_wrap, print_data=print_data,
formatter=formatter, left=left)
save_plot_with_error_bars(df, title, filename_prefix + '-with-error-bars.png', x=x, hue=hue, col=col,
col_wrap=col_wrap, print_data=print_data, formatter=formatter, left=left)
def save_lmplot(df, x, title, filename, print_data=False, formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)),
left=left_adjust_single):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
markers_length = len(df["Benchmark"].unique())
g = sns.lmplot(data=df, x=x, y="Score", hue="Benchmark", size=6, legend=False, x_jitter=0.2, y_jitter=0.5,
markers=['o', 'v', '^', '<', '>', '+', 's', 'p', '*', 'x', 'D'][:markers_length])
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
plt.subplots_adjust(top=0.9, left=left)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.legend(loc='upper left', frameon=True)
g.fig.suptitle(title)
plt.savefig(filename)
plt.clf()
plt.cla()
plt.close(fig)
def replace_benchmark_names(df):
df = df.replace(r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject$', r'\1',
regex=True)
df = df.replace([r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject:useObject(.*)$'],
[r'\1\2'], regex=True)
# Profiler Details
df = df.replace([r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject:(.*)$'],
[r'\1\2'], regex=True)
df = df.replace('com.github.chrishantha.microbenchmark.objectpool.TestObjectBenchmark.expensiveObjectCreate',
'OnDemandExpensiveObject', regex=False)
df = df.replace(
r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.TestObjectBenchmark\.expensiveObjectCreate' +
r':expensiveObjectCreate(.*)$', r'OnDemandExpensiveObject\1', regex=True)
# Profiler Details
df = df.replace(
r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.TestObjectBenchmark\.expensiveObjectCreate' +
r':(.*)$', r'OnDemandExpensiveObject\1', regex=True)
return df
def save_percentile_plot(df, title_percentile, percentile):
df_sample_percentile = df.loc[df['Benchmark'].str.endswith(percentile)]
save_plot(df_sample_percentile, "Sample Time " + title_percentile + "th Percentile Comparison",
"sample-time-" + percentile + "th-percentile.png", formatter=tkr.FormatStrFormatter('%.2e'))
def main():
all_results = glob.glob("results-*-threads.csv")
print("Creating charts using data in following files:")
for file in all_results:
print(file)
print("\nCreating charts...\n")
df = pd.concat(map(pd.read_csv, all_results), ignore_index=True)
df = replace_benchmark_names(df)
df.rename(columns={"Param: poolSize": param_pool_size}, inplace=True)
df.to_csv('all_results.csv')
# df = df[df['Benchmark'].isin(['FastObjectPool', 'StackObjectPool', 'StormpotBlazePool'])]
thrpt_unit = 'ops/ms'
sample_unit = 'ms/op'
alloc_rate_unit = 'MB/sec'
df_thrpt = df.loc[(df['Mode'] == "thrpt") & (df['Unit'] == thrpt_unit)]
thrpt_mask = df_thrpt['Benchmark'].isin(['OnDemandExpensiveObject'])
save_plots(df_thrpt[~thrpt_mask], "Throughput vs Threads Comparison", "thrpt-vs-threads")
save_plots(df_thrpt[~thrpt_mask], "Throughput vs Pool Sizes Comparison", "thrpt-vs-pool-sizes", col="Threads",
x=param_pool_size)
save_lmplot(df_thrpt, "Threads", "Throughput vs Threads", "lmplot-thrpt-vs-threads.png")
save_lmplot(df_thrpt[~pd.isnull(df_thrpt[param_pool_size])], param_pool_size, "Throughput vs Pool Sizes",
"lmplot-thrpt-vs-pool-sizes.png")
for benchmark in df_thrpt[~thrpt_mask]['Benchmark'].unique():
df_benchmark_thrpt = df_thrpt[df_thrpt['Benchmark'] == benchmark]
save_plots(df_benchmark_thrpt, "Throughput vs Threads", "thrpt-" + benchmark, col="Benchmark",
hue=param_pool_size, col_wrap=1, left=left_adjust_single)
df_sample = df.loc[(df['Mode'] == "sample") & (df['Unit'] == sample_unit)]
# Score Error (99.9%) is NaN for percentiles
df_sample_without_percentiles = df_sample[~pd.isnull(df_sample['Score Error (99.9%)'])]
df_sample_pools_without_percentiles = df_sample_without_percentiles[
~pd.isnull(df_sample_without_percentiles[param_pool_size])]
time_formatter = tkr.FuncFormatter(lambda y, p: "{:.2e}".format(y))
sample_mask = df_sample_without_percentiles['Benchmark'].isin(['OnDemandExpensiveObject'])
save_plots(df_sample_without_percentiles[~sample_mask], "Sample Time vs Threads Comparison",
"sample-time-vs-threads", formatter=time_formatter)
save_plots(df_sample_pools_without_percentiles, "Sample Time vs Pool Sizes Comparison",
"sample-time-vs-pool-sizes", col="Threads", x=param_pool_size, formatter=time_formatter)
save_lmplot(df_sample_without_percentiles, "Threads", "Sample Time vs Threads", "lmplot-sample-vs-threads.png",
formatter=time_formatter, left=left_adjust_single)
save_lmplot(df_sample_pools_without_percentiles, param_pool_size, "Sample Time vs Pool Sizes",
"lmplot-sample-vs-pool-sizes.png", formatter=time_formatter, left=left_adjust_single)
for benchmark in df_sample_pools_without_percentiles['Benchmark'].unique():
df_benchmark_sample = df_sample_pools_without_percentiles[
df_sample_pools_without_percentiles['Benchmark'] == benchmark]
save_plots(df_benchmark_sample, "Sample Time vs Threads", "sample-time-" + benchmark, col="Benchmark",
hue=param_pool_size, col_wrap=1, formatter=time_formatter, left=left_adjust_single)
# Filter OnDemandExpensiveObject
df_sample_pools = df_sample[~df_sample['Benchmark'].str.contains('OnDemandExpensiveObject.*')]
save_percentile_plot(df_sample_pools, '50', 'p0.50')
save_percentile_plot(df_sample_pools, '90', 'p0.90')
save_percentile_plot(df_sample_pools, '95', 'p0.95')
save_percentile_plot(df_sample_pools, '99', 'p0.99')
save_percentile_plot(df_sample_pools, '99.9', 'p0.999')
save_percentile_plot(df_sample_pools, '99.99', 'p0.9999')
save_percentile_plot(df_sample_pools, '100', 'p1.00')
df_sample_percentiles = df_sample_pools.copy()
df_sample_percentiles = df_sample_percentiles.loc[ | pd.isnull(df_sample_percentiles['Score Error (99.9%)']) | pandas.isnull |
import pytest
import os
from mapping import util
from pandas.util.testing import assert_frame_equal, assert_series_equal
import pandas as pd
from pandas import Timestamp as TS
import numpy as np
@pytest.fixture
def price_files():
cdir = os.path.dirname(__file__)
path = os.path.join(cdir, 'data/')
files = ["CME-FVU2014.csv", "CME-FVZ2014.csv"]
return [os.path.join(path, f) for f in files]
def assert_dict_of_frames(dict1, dict2):
assert dict1.keys() == dict2.keys()
for key in dict1:
assert_frame_equal(dict1[key], dict2[key])
def test_read_price_data(price_files):
# using default name_func in read_price_data()
df = util.read_price_data(price_files)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "CME-FVU2014"),
(dt1, "CME-FVZ2014"),
(dt2, "CME-FVZ2014")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def name_func(fstr):
file_name = os.path.split(fstr)[-1]
name = file_name.split('-')[1].split('.')[0]
return name[-4:] + name[:3]
df = util.read_price_data(price_files, name_func)
dt1 = TS("2014-09-30")
dt2 = TS("2014-10-01")
idx = pd.MultiIndex.from_tuples([(dt1, "2014FVU"), (dt1, "2014FVZ"),
(dt2, "2014FVZ")],
names=["date", "contract"])
df_exp = pd.DataFrame([119.27344, 118.35938, 118.35938],
index=idx, columns=["Open"])
assert_frame_equal(df, df_exp)
def test_calc_rets_one_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')])
rets = pd.Series([0.1, 0.05, 0.1, 0.8], index=idx)
vals = [1, 0.5, 0.5, 1]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-05'), 'CLG5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([0.1, 0.075, 0.8],
index=weights.index.levels[0],
columns=['CL1'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, 0.15], [0.075, 0.45], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_nans_in_second_generic():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, np.NaN, 0.05, 0.1, np.NaN, -0.5, 0.2],
index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = pd.DataFrame(vals, index=widx, columns=['CL1', 'CL2'])
wrets = util.calc_rets(rets, weights)
wrets_exp = pd.DataFrame([[0.1, np.NaN], [0.075, np.NaN], [-0.5, 0.2]],
index=weights.index.levels[0],
columns=['CL1', 'CL2'])
assert_frame_equal(wrets, wrets_exp)
def test_calc_rets_two_generics_non_unique_columns():
idx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')])
rets = pd.Series([0.1, 0.15, 0.05, 0.1, 0.8, -0.5, 0.2], index=idx)
vals = [[1, 0], [0, 1],
[0.5, 0], [0.5, 0.5], [0, 0.5],
[1, 0], [0, 1]]
widx = pd.MultiIndex.from_tuples([(TS('2015-01-03'), 'CLF5'),
(TS('2015-01-03'), 'CLG5'),
(TS('2015-01-04'), 'CLF5'),
(TS('2015-01-04'), 'CLG5'),
(TS('2015-01-04'), 'CLH5'),
(TS('2015-01-05'), 'CLG5'),
(TS('2015-01-05'), 'CLH5')
])
weights = | pd.DataFrame(vals, index=widx, columns=['CL1', 'CL1']) | pandas.DataFrame |
# <NAME>
# for BroadInsitute
# in 2019
from __future__ import print_function
import pandas as pd
import numpy as np
from genepy.utils import helper as h
import gzip
import seaborn as sns
def vcf_to_df(path, hasfilter=False, samples=['sample'], additional_cols=[], **kwargs):
"""
transforms a vcf file into a dataframe file as best as it can
Args:
-----
path: str filepath to the vcf file
hasfilter: bool whether or not the vcf has a filter column
samples: list[str] colnames of the sample names.
additional_cols: list[str] of additional colnames in the vcf already looks for 'DB', 'SOMATIC', 'GERMLINE', "OVERLAP", "IN_PON", "STR", "ReverseComplementedAlleles"
Returns:
--------
a dataframe fo the vcf
a dict associating each column with its description (gathered from the vcf header)
"""
uniqueargs = ['DB', 'SOMATIC', 'GERMLINE', "OVERLAP", "IN_PON",
"STR", "ReverseComplementedAlleles"] + additional_cols
def read_comments(f):
fields = {}
description = {}
for l in f:
l = l.decode("utf-8") if type(l) is not str else l
if l.startswith('##'):
if 'FORMAT' in l[:20]:
res = l.split('ID=')[1].split(',')[0]
desc = l.split('Description=')[1][:-2]
description.update({res: desc})
if 'INFO' in l[:20]:
res = l.split('ID=')[1].split(',')[0]
desc = l.split('Description=')[1][:-2]
description.update({res: desc})
fields.update({res: []})
else:
break
return fields, description
if path.endswith('.gz'):
with gzip.open(path, 'r') as f:
fields, description = read_comments(f)
else:
with open(path, 'r') as f:
fields, description = read_comments(f)
names = ['chr', 'pos', 'id', 'ref', 'alt', 'qual']
names += ['filter'] if hasfilter else ['strand']
names += ['data', 'format'] + samples
csvkwargs = {'sep': '\t',
'index_col': False,
'header': None,
#'names': names,
'comment': "#"}
a = | pd.read_csv(path, **csvkwargs) | pandas.read_csv |
import time
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage, leaves_list
# amat := alignment matrix with shape (N,L,2); last dimension is
# part of an unused feature, so ignore it
# namat := list of alignment iterations; each entry is a dictionary
# with the iteration's amat, score, method, and time;
# pima_init() initializes the list from the traces, and
# pima_iter() automatically iterates from last namat entry
# traces := list of log's traces; each trace is a numpy array of
# integers; the integers map to activity types
# csv to log, returns list of traces and case and activity index mappings
def csv2log(filename, caseids_col, acts_col,
starts_col, isplg=False):
# csv to df
df = | pd.read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
"""Describe raw data used and share of useable papers."""
from configparser import ConfigParser
from glob import glob
import pandas as pd
from _910_analyze_multiaff_shares import make_stacked_lineplot
JOURNAL_FOLDER = "./002_journal_samples/"
SOURCE_FOLDER = "./100_source_articles/"
COUNTS_FOLDER = "./100_meta_counts/"
OUTPUT_FOLDER = "./990_output/"
pd.plotting.register_matplotlib_converters()
pd.options.display.float_format = '{:,}'.format
config = ConfigParser()
config.optionxform = str
config.read("./definitions.cfg")
asjc_map = dict(config["field names"])
def format_shares(df, val_name):
"""Melt wide DataFrame and replace field codes with field names."""
df.columns = [asjc_map.get(c, c) for c in df.columns]
df = df[asjc_map.values()]
df.index.name = "year"
return (df.reset_index()
.melt(id_vars=["year"], var_name="field", value_name=val_name)
.sort_values("field"))
def read_from_statistics(fname):
"""Read number from statistics text files."""
fname = f"{OUTPUT_FOLDER}Statistics/{fname}.txt"
with open(fname) as inf:
return int(inf.read().strip().replace(",", ""))
def main():
# Compute number of authors by field
author_counts = pd.Series(dtype="uint64")
for field in asjc_map.keys():
authors = set()
for f in glob(f"{SOURCE_FOLDER}articles_{field}-*.csv"):
df = pd.read_csv(f, encoding="utf8", usecols=["author"])
authors.update(df["author"].unique())
author_counts[field] = len(authors)
# LaTeX table with authors and papers by field
fname = JOURNAL_FOLDER + "journal-counts.csv"
journals = pd.read_csv(fname, index_col=0, encoding="utf8").T
journals = journals[["Total", "Coverage > 5 years", "Used"]]
journals = journals.rename(columns={"Used": "Sampled"})
overall = journals.copy()
cols = [("Journals", c) for c in overall.columns]
overall.columns = pd.MultiIndex.from_tuples(cols)
# Add columns
fname = COUNTS_FOLDER + "num_publications.csv"
publications = pd.read_csv(fname, index_col=0, encoding="utf8")
overall[("Articles", "Sampled")] = publications.sum(axis=0)
fname = COUNTS_FOLDER + "num_articles.csv"
articles = pd.read_csv(fname, index_col=0, encoding="utf8")
overall[("Articles", "research-type")] = articles.sum(axis=0)
fname = COUNTS_FOLDER + "num_useful.csv"
useful = | pd.read_csv(fname, index_col=0, encoding="utf8") | pandas.read_csv |
#-*- coding: utf-8 -*-
import os
import re
import pandas as pd
from modules.venders.vender import Vender
class PostCleaning(Vender):
def __init__(self, code, vender=None):
Vender.__init__(self, None, vender)
self.set_dividend_payout_ratio()
self.set_bps_multiple(0.5)
self.set_bps_multiple(2)
self.set_bps_multiple(3)
self.set_get_price()
self.set_net_income_ratio()
self.set_sales_fcff()
self.set_ev1_fcff()
def set_ev1_fcff(self):
column_name = 'EV_FCFF'
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
if 'FCFF' in data.columns and 'EV1' in data.columns:
for month in data.index.values:
value = (data['FCFF'][month] / data['EV1'][month]) * 100
df[column_name][month] = round(value if not pd.isnull(value) else 0, 2)
self.concat_data(df)
def set_sales_fcff(self):
column_name = 'SALES_FCFF'
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
if 'FCFF' in data.columns and 'SALES' in data.columns:
for month in data.index.values:
value = (data['FCFF'][month] / data['SALES'][month]) * 100
df[column_name][month] = round(value if not pd.isnull(value) else 0, 2)
self.concat_data(df)
def set_net_income_ratio(self):
column_name = 'NET_INCOME_RATIO'
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
if 'EPS_IFRS' in data.columns and 'SALES' in data.columns:
for month in data.index.values:
value = (data['EPS_IFRS'][month] /
((data['SALES'][month] * 100000000) /
(self.data['STOCK_COUNT'][month] * 1000))) * 100
df[column_name][month] = round(value if not pd.isnull(value) else 0, 2)
self.concat_data(df)
# 주가
def set_get_price(self):
column_name = 'PRICE'
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
if 'BPS' in data.columns and 'PBR' in data.columns:
for month in data.index.values:
value = data['BPS'][month] * self.data['PBR'][month]
df[column_name][month] = int(value if not pd.isnull(value) else 0)
self.concat_data(df)
def set_bps_multiple(self, multiple):
column_name = 'BPS_TIMES_' + str(multiple)
df = pd.DataFrame(columns=[column_name], index=self.data.index.values)
data = self.get_data()
if 'BPS' in data.columns:
for month in data.index.values:
value = data['BPS'][month] * multiple
df[column_name][month] = int(value if not pd.isnull(value) else 0)
self.concat_data(df)
# 배당성향(연결)
def set_dividend_payout_ratio(self):
column_name = 'DIVIDEND_PAYOUT_RATIO'
df = | pd.DataFrame(columns=[column_name], index=self.data.index.values) | pandas.DataFrame |
import json
import re
import sys
import os
import pandas as pd
import io
import config
from functools import reduce
import operator
from pathlib import Path
from rsonlite import simpleparse
import runcmd
def count_lspaces(l):
# print(">>", repr(l))
return re.search(r'\S', l).start()
def get_d_at_level(d, lvl):
for l in lvl:
if l not in d:
d[l] = {}
d = d[l]
return d
def clean_json(d):
if not any(d.values()):
return list(d.keys())
else:
for k, v in d.items():
d[k] = clean_json(v)
def match_keys(d, keys, only_last=False):
ret = []
# print(keys)
# print(keys)
for sk in keys.split('//'):
sk = re.compile(sk)
if isinstance(d, list):
d = d[0]
for k, v in d.items():
if sk.match(k):
ret.append(k)
d = d[k]
break
if only_last:
return 'key=NOTFOUND' if not ret else ret[-1]
else:
return ret
def extract(d, lkeys):
for k in lkeys:
if isinstance(d, list):
d = d[0]
d = d.get(k, {})
return d
def split_equalto_delim(k):
return k.split('=', 1)
def retrieve(dict_, nest):
'''
Navigates dictionaries like dict_[nest0][nest1][nest2]...
gracefully.
'''
dict_ = dict_.to_dict() # for pandas
try:
return reduce(operator.getitem, nest, dict_)
except KeyError as e:
return ""
except TypeError as e:
return ""
class PhoneDump(object):
def __init__(self, dev_type, fname):
self.device_type = dev_type
self.fname = fname
# df must be a dictionary
self.df = self.load_file()
def load_file(self):
raise Exception("Not Implemented")
def info(self, appid):
raise Exception("Not Implemented")
class AndroidDump(PhoneDump):
def __init__(self, fname):
self.dumpf = fname
super(AndroidDump, self).__init__('android', fname)
self.df = self.load_file()
def _extract_lines(self, service):
"""Extract lines for te DUMP OF SERVICE <service> """
cmd = "sed -n -e '/DUMP OF SERVICE {}/,/DUMP OF SERVICE/p' {fname} "\
"| head -n -1"
s = "DUMP OF SERVICE {}".format(service)
started = False
with open(self.dumpf) as f:
for l in f:
if started:
if "DUMP OF SERVICE" in l:
break
else:
yield l
elif s in l:
started = True
@staticmethod
def custom_parse(service, lines):
if service == 'appops':
return lines
@staticmethod
def new_parse_dump_file(fname):
"""Not used working using simple parse to parse the files. """
if not Path(fname).exists():
print("File: {!r} does not exists".format(fname))
data = open(fname)
d = {}
service = ''
join_lines = []
custom_parse_services = {"appops"}
def _parse(lines):
try:
if service in custom_parse_services:
return AndroidDump.custom_parse(service, lines)
else:
return simpleparse('\n'.join(join_lines))
except Exception as ex:
print("Could not parse for {} service={}. Exception={}"\
.format(fname, service, ex))
return lines
for i, l in enumerate(data):
if l.startswith('----'): continue
if l.startswith('DUMP OF SERVICE'):
if service:
d[service] = _parse(join_lines)
service = l.strip().rsplit(' ', 1)[1]
join_lines = []
else:
join_lines.append(l)
if len(join_lines) > 0 and len(d.get(service, [])) == 0:
d[service] = _parse(join_lines)
return d
@staticmethod
def parse_dump_file(fname):
if not Path(fname).exists():
print("File: {!r} does not exists".format(fname))
data = open(fname)
d = {}
service = ''
lvls = ['' for _ in range(20)] # Max 100 levels allowed
curr_spcnt, curr_lvl = 0, 0
for i, l in enumerate(data):
if l.startswith('----'): continue
if l.startswith('DUMP OF SERVICE'):
service = l.strip().rsplit(' ', 1)[1]
d[service] = res = {}
curr_spcnt = [0]
curr_lvl = 0
else:
if not l.strip(): # subsection ends
continue
l = l.replace('\t', ' ')
t_spcnt = count_lspaces(l)
# print(t_spcnt, curr_spcnt, curr_lvl)
# if t_spcnt == 1:
# print(repr(l))
if t_spcnt > 0 and t_spcnt >= curr_spcnt[-1]*2:
curr_lvl += 1
curr_spcnt.append(t_spcnt)
while curr_spcnt and curr_spcnt[-1] > 0 and t_spcnt <= curr_spcnt[-1]/2:
curr_lvl -= 1
curr_spcnt.pop()
if curr_spcnt[-1]>0:
curr_spcnt[-1] = t_spcnt
assert (t_spcnt != 0) or (curr_lvl == 0), \
"t_spc: {} <--> curr_lvl: {}\n{}".format(t_spcnt, curr_lvl, l)
# print(lvls[:curr_lvl], curr_lvl, curr_spcnt)
curr = get_d_at_level(res, lvls[:curr_lvl])
k = l.strip().rstrip(':')
lvls[curr_lvl] = k # '{} --> {}'.format(curr_lvl, k)
curr[lvls[curr_lvl]] = {}
return d
def load_file(self):
fname = self.fname.rsplit('.', 1)[0] + '.txt'
json_fname = fname.rsplit('.', 1)[0] + '.json'
if os.path.exists(json_fname):
with open(json_fname, 'r') as f:
try:
d = json.load(f)
except Exception as ex:
print(ex)
return {}
else:
with open(json_fname, 'w') as f:
try:
d = self.parse_dump_file(fname)
json.dump(d, f, indent=2)
except Exception as ex:
print("File ({!r}) could not be opened or parsed.".format(fname))
print("Exception: {}".format(ex))
return | pd.DataFrame([]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from statsmodels.compat.pandas import Appender, Substitution, to_numpy
from collections.abc import Iterable
import datetime as dt
from types import SimpleNamespace
import warnings
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde, norm
from statsmodels.tsa.base.prediction import PredictionResults
import statsmodels.base.wrapper as wrap
from statsmodels.iolib.summary import Summary
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.decorators import cache_readonly, cache_writable
from statsmodels.tools.docstring import Docstring, remove_parameters
from statsmodels.tools.validation import (
array_like,
bool_like,
int_like,
string_like,
)
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tsa.base import tsa_model
from statsmodels.tsa.deterministic import (
DeterministicProcess,
Seasonality,
TimeTrend,
)
from statsmodels.tsa.tsatools import (
freq_to_period,
lagmat,
)
__all__ = ["AR", "AutoReg"]
AR_DEPRECATION_WARN = """
statsmodels.tsa.AR has been deprecated in favor of statsmodels.tsa.AutoReg and
statsmodels.tsa.SARIMAX.
AutoReg adds the ability to specify exogenous variables, include time trends,
and add seasonal dummies. The AutoReg API differs from AR since the model is
treated as immutable, and so the entire specification including the lag
length must be specified when creating the model. This change is too
substantial to incorporate into the existing AR api. The function
ar_select_order performs lag length selection for AutoReg models.
AutoReg only estimates parameters using conditional MLE (OLS). Use SARIMAX to
estimate ARX and related models using full MLE via the Kalman Filter.
To silence this warning and continue using AR until it is removed, use:
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.ar_model.AR', FutureWarning)
"""
REPEATED_FIT_ERROR = """
Model has been fit using maxlag={0}, method={1}, ic={2}, trend={3}. These
cannot be changed in subsequent calls to `fit`. Instead, use a new instance of
AR.
"""
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x ** 2, axis=axis)
def _ar_predict_out_of_sample(y, params, k_ar, k_trend, steps, start=0):
mu = params[:k_trend] if k_trend else 0 # only have to worry constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(k_ar + steps) # this is one too big but does not matter
if start:
endog[:k_ar] = y[start - k_ar : start]
else:
endog[:k_ar] = y[-k_ar:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i : i + k_ar])
forecast[i] = fcast
endog[i + k_ar] = fcast
return forecast
class AutoReg(tsa_model.TimeSeriesModel):
"""
Autoregressive AR-X(p) model.
Estimate an AR-X model using Conditional Maximum Likelihood (OLS).
Parameters
----------
endog : array_like
A 1-d endogenous response variable. The dependent variable.
lags : {int, list[int]}
The number of lags to include in the model if an integer or the
list of lag indices to include. For example, [1, 4] will only
include lags 1 and 4 while lags=4 will include lags 1, 2, 3, and 4.
trend : {'n', 'c', 't', 'ct'}
The trend to include in the model:
* 'n' - No trend.
* 'c' - Constant only.
* 't' - Time trend only.
* 'ct' - Constant and time trend.
seasonal : bool
Flag indicating whether to include seasonal dummies in the model. If
seasonal is True and trend includes 'c', then the first period
is excluded from the seasonal terms.
exog : array_like, optional
Exogenous variables to include in the model. Must have the same number
of observations as endog and should be aligned so that endog[i] is
regressed on exog[i].
hold_back : {None, int}
Initial observations to exclude from the estimation sample. If None,
then hold_back is equal to the maximum lag in the model. Set to a
non-zero value to produce comparable models with different lag
length. For example, to compare the fit of a model with lags=3 and
lags=1, set hold_back=3 which ensures that both models are estimated
using observations 3,...,nobs. hold_back must be >= the maximum lag in
the model.
period : {None, int}
The period of the data. Only used if seasonal is True. This parameter
can be omitted if using a pandas object for endog that contains a
recognized frequency.
missing : str
Available options are 'none', 'drop', and 'raise'. If 'none', no nan
checking is done. If 'drop', any observations with nans are dropped.
If 'raise', an error is raised. Default is 'none'.
deterministic : DeterministicProcess
A deterministic process. If provided, trend and seasonal are ignored.
A warning is raised if trend is not "n" and seasonal is not False.
old_names : bool
Flag indicating whether to use the v0.11 names or the v0.12+ names.
.. deprecated:: 0.13
old_names is deprecated and will be removed after 0.14 is
released. You must update any code reliant on the old variable
names to use the new names.
See Also
--------
statsmodels.tsa.statespace.sarimax.SARIMAX
Estimation of SARIMAX models using exact likelihood and the
Kalman Filter.
Examples
--------
>>> import statsmodels.api as sm
>>> from statsmodels.tsa.ar_model import AutoReg
>>> data = sm.datasets.sunspots.load_pandas().data['SUNACTIVITY']
>>> out = 'AIC: {0:0.3f}, HQIC: {1:0.3f}, BIC: {2:0.3f}'
Start by fitting an unrestricted Seasonal AR model
>>> res = AutoReg(data, lags = [1, 11, 12]).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.945, HQIC: 5.970, BIC: 6.007
An alternative used seasonal dummies
>>> res = AutoReg(data, lags=1, seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 6.017, HQIC: 6.080, BIC: 6.175
Finally, both the seasonal AR structure and dummies can be included
>>> res = AutoReg(data, lags=[1, 11, 12], seasonal=True, period=11).fit()
>>> print(out.format(res.aic, res.hqic, res.bic))
AIC: 5.884, HQIC: 5.959, BIC: 6.071
"""
def __init__(
self,
endog,
lags,
trend="c",
seasonal=False,
exog=None,
hold_back=None,
period=None,
missing="none",
*,
deterministic=None,
old_names=False
):
super(AutoReg, self).__init__(endog, exog, None, None, missing=missing)
self._trend = string_like(
trend, "trend", options=("n", "c", "t", "ct")
)
self._seasonal = bool_like(seasonal, "seasonal")
self._period = int_like(period, "period", optional=True)
if self._period is None and self._seasonal:
if self.data.freq:
self._period = freq_to_period(self._index_freq)
else:
err = (
"freq cannot be inferred from endog and model includes"
" seasonal terms. The number of periods must be "
"explicitly set when the endog's index does not "
"contain a frequency."
)
raise ValueError(err)
terms = [TimeTrend.from_string(self._trend)]
if seasonal:
terms.append(Seasonality(self._period))
if hasattr(self.data.orig_endog, "index"):
index = self.data.orig_endog.index
else:
index = np.arange(self.data.endog.shape[0])
self._user_deterministic = False
if deterministic is not None:
if not isinstance(deterministic, DeterministicProcess):
raise TypeError("deterministic must be a DeterministicProcess")
self._deterministics = deterministic
self._user_deterministic = True
else:
self._deterministics = DeterministicProcess(
index, additional_terms=terms
)
self._lags = lags
self._exog_names = []
self._k_ar = 0
self._hold_back = int_like(hold_back, "hold_back", optional=True)
self._old_names = bool_like(old_names, "old_names", optional=False)
if deterministic is not None and (
self._trend != "n" or self._seasonal
):
warnings.warn(
'When using deterministic, trend must be "n" and '
"seasonal must be False.",
RuntimeWarning,
)
if self._old_names:
warnings.warn(
"old_names will be removed after the 0.14 release. You should "
"stop setting this parameter and use the new names.",
FutureWarning,
)
self._check_lags()
self._setup_regressors()
self.nobs = self._y.shape[0]
self.data.xnames = self.exog_names
@property
def ar_lags(self):
"""The autoregressive lags included in the model"""
return self._lags
@property
def hold_back(self):
"""The number of initial obs. excluded from the estimation sample."""
return self._hold_back
@property
def seasonal(self):
"""Flag indicating that the model contains a seasonal component."""
return self._seasonal
@property
def df_model(self):
"""The model degrees of freedom."""
return self._x.shape[1]
@property
def exog_names(self):
"""Names of exogenous variables included in model"""
return self._exog_names
def initialize(self):
"""Initialize the model (no-op)."""
pass
def _check_lags(self):
lags = self._lags
if isinstance(lags, Iterable):
lags = np.array(sorted([int_like(lag, "lags") for lag in lags]))
self._lags = lags
if np.any(lags < 1) or np.unique(lags).shape[0] != lags.shape[0]:
raise ValueError(
"All values in lags must be positive and " "distinct."
)
self._maxlag = np.max(lags)
else:
self._maxlag = int_like(lags, "lags")
if self._maxlag < 0:
raise ValueError("lags must be a positive scalar.")
self._lags = np.arange(1, self._maxlag + 1)
if self._hold_back is None:
self._hold_back = self._maxlag
if self._hold_back < self._maxlag:
raise ValueError(
"hold_back must be >= lags if lags is an int or"
"max(lags) if lags is array_like."
)
def _setup_regressors(self):
maxlag = self._maxlag
hold_back = self._hold_back
exog_names = []
endog_names = self.endog_names
x, y = lagmat(self.endog, maxlag, original="sep")
exog_names.extend(
[endog_names + ".L{0}".format(lag) for lag in self._lags]
)
if len(self._lags) < maxlag:
x = x[:, self._lags - 1]
self._k_ar = x.shape[1]
deterministic = self._deterministics.in_sample()
if deterministic.shape[1]:
x = np.c_[to_numpy(deterministic), x]
if self._old_names:
deterministic_names = []
if "c" in self._trend:
deterministic_names.append("intercept")
if "t" in self._trend:
deterministic_names.append("trend")
if self._seasonal:
period = self._period
names = ["seasonal.{0}".format(i) for i in range(period)]
if "c" in self._trend:
names = names[1:]
deterministic_names.extend(names)
else:
deterministic_names = list(deterministic.columns)
exog_names = deterministic_names + exog_names
if self.exog is not None:
x = np.c_[x, self.exog]
exog_names.extend(self.data.param_names)
y = y[hold_back:]
x = x[hold_back:]
if y.shape[0] < x.shape[1]:
reg = x.shape[1]
period = self._period
trend = 0 if self._trend == "n" else len(self._trend)
seas = 0 if not self._seasonal else period - ("c" in self._trend)
lags = self._lags.shape[0]
nobs = y.shape[0]
raise ValueError(
"The model specification cannot be estimated. "
"The model contains {0} regressors ({1} trend, "
"{2} seasonal, {3} lags) but after adjustment "
"for hold_back and creation of the lags, there "
"are only {4} data points available to estimate "
"parameters.".format(reg, trend, seas, lags, nobs)
)
self._y, self._x = y, x
self._exog_names = exog_names
def fit(self, cov_type="nonrobust", cov_kwds=None, use_t=False):
"""
Estimate the model parameters.
Parameters
----------
cov_type : str
The covariance estimator to use. The most common choices are listed
below. Supports all covariance estimators that are available
in ``OLS.fit``.
* 'nonrobust' - The class OLS covariance estimator that assumes
homoskedasticity.
* 'HC0', 'HC1', 'HC2', 'HC3' - Variants of White's
(or Eiker-Huber-White) covariance estimator. `HC0` is the
standard implementation. The other make corrections to improve
the finite sample performance of the heteroskedasticity robust
covariance estimator.
* 'HAC' - Heteroskedasticity-autocorrelation robust covariance
estimation. Supports cov_kwds.
- `maxlags` integer (required) : number of lags to use.
- `kernel` callable or str (optional) : kernel
currently available kernels are ['bartlett', 'uniform'],
default is Bartlett.
- `use_correction` bool (optional) : If true, use small sample
correction.
cov_kwds : dict, optional
A dictionary of keyword arguments to pass to the covariance
estimator. `nonrobust` and `HC#` do not support cov_kwds.
use_t : bool, optional
A flag indicating that inference should use the Student's t
distribution that accounts for model degree of freedom. If False,
uses the normal distribution. If None, defers the choice to
the cov_type. It also removes degree of freedom corrections from
the covariance estimator when cov_type is 'nonrobust'.
Returns
-------
AutoRegResults
Estimation results.
See Also
--------
statsmodels.regression.linear_model.OLS
Ordinary Least Squares estimation.
statsmodels.regression.linear_model.RegressionResults
See ``get_robustcov_results`` for a detailed list of available
covariance estimators and options.
Notes
-----
Use ``OLS`` to estimate model parameters and to estimate parameter
covariance.
"""
# TODO: Determine correction for degree-of-freedom
# Special case parameterless model
if self._x.shape[1] == 0:
return AutoRegResultsWrapper(
AutoRegResults(self, np.empty(0), np.empty((0, 0)))
)
ols_mod = OLS(self._y, self._x)
ols_res = ols_mod.fit(
cov_type=cov_type, cov_kwds=cov_kwds, use_t=use_t
)
cov_params = ols_res.cov_params()
use_t = ols_res.use_t
if cov_type == "nonrobust" and not use_t:
nobs = self._y.shape[0]
k = self._x.shape[1]
scale = nobs / (nobs - k)
cov_params /= scale
res = AutoRegResults(
self, ols_res.params, cov_params, ols_res.normalized_cov_params
)
return AutoRegResultsWrapper(res)
def _resid(self, params):
params = array_like(params, "params", ndim=2)
resid = self._y - self._x @ params
return resid.squeeze()
def loglike(self, params):
"""
Log-likelihood of model.
Parameters
----------
params : ndarray
The model parameters used to compute the log-likelihood.
Returns
-------
float
The log-likelihood value.
"""
nobs = self.nobs
resid = self._resid(params)
ssr = resid @ resid
llf = -(nobs / 2) * (np.log(2 * np.pi) + np.log(ssr / nobs) + 1)
return llf
def score(self, params):
"""
Score vector of model.
The gradient of logL with respect to each parameter.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The score vector evaluated at the parameters.
"""
resid = self._resid(params)
return self._x.T @ resid
def information(self, params):
"""
Fisher information matrix of model.
Returns -1 * Hessian of the log-likelihood evaluated at params.
Parameters
----------
params : ndarray
The model parameters.
Returns
-------
ndarray
The information matrix.
"""
resid = self._resid(params)
sigma2 = resid @ resid / self.nobs
return sigma2 * (self._x.T @ self._x)
def hessian(self, params):
"""
The Hessian matrix of the model.
Parameters
----------
params : ndarray
The parameters to use when evaluating the Hessian.
Returns
-------
ndarray
The hessian evaluated at the parameters.
"""
return -self.information(params)
def _setup_oos_forecast(self, add_forecasts, exog_oos):
x = np.zeros((add_forecasts, self._x.shape[1]))
oos_exog = self._deterministics.out_of_sample(steps=add_forecasts)
n_deterministic = oos_exog.shape[1]
x[:, :n_deterministic] = to_numpy(oos_exog)
# skip the AR columns
loc = n_deterministic + len(self._lags)
if self.exog is not None:
x[:, loc:] = exog_oos[:add_forecasts]
return x
def _wrap_prediction(self, prediction, start, end):
n_values = end - start
if not isinstance(self.data.orig_endog, (pd.Series, pd.DataFrame)):
return prediction[-n_values:]
index = self._index
if end > self.endog.shape[0]:
freq = getattr(index, "freq", None)
if freq:
if isinstance(index, pd.PeriodIndex):
index = | pd.period_range(index[0], freq=freq, periods=end) | pandas.period_range |
"""Helper functions to read and convert common
data formats."""
import pandas as pd
import numpy as np
import os
import logging
from functools import partial
from .format_checkers import _is_bed_row
def convert_bed_to_bedpe(input_file, target_file, halfwindowsize, chromsize_path):
"""Converts bedfile at inputFile to a bedpefile,
expanding the point of interest up- and downstream
by halfwindowsize basepairs.
Only intervals that fall within the bounds of
chromosomes are written out.
"""
# load input_file
input_frame = | pd.read_csv(input_file, sep="\t", header=None) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
from collections import Counter
import itertools
import os
#import WOSutilities as wosutil
#path2rawdata='/home/apoorva_kasoju2712/WOS_data'
def load_author_data():
#['ArticleID', 'AuthorOrder', 'AuthorDAIS', 'FullName', 'LastName', 'FirstName', 'Email']
author_df_1 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = [1900] + list(range(1945, 1955)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None,dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_2 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = list(range(1955, 1965)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None,dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_3 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = list(range(1965, 1975)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None, dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_4 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = list(range(1975, 1990)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None, dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_5 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = list(range(1990, 2005)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None,dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_6 = wosutil.load_wos_data(name = 'authorship',path2rawdata = path2rawdata,year_list = list(range(2005, 2016)),columns=['ArticleID','AuthorOrder','AuthorDAIS','FullName','LastName','FirstName'],
duplicate_subset = None,dropna = ['ArticleID','FullName', 'LastName','FirstName'], verbose = 50)
author_df_12=pd.concat([author_df_1, author_df_2], ignore_index=True)
del author_df_1
del author_df_2
author_df_34=pd.concat([author_df_3, author_df_4], ignore_index=True)
del author_df_3
del author_df_4
author_df_56=pd.concat([author_df_5, author_df_6], ignore_index=True)
del author_df_5
del author_df_6
author_df=pd.concat([author_df_12,author_df_34,author_df_56],ignore_index=True)
del author_df_12
del author_df_34
del author_df_56
print(author_df.shape)
return author_df
def load_article_data():
article_df_1 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = [1900] + list(range(1945, 1955)),
columns = ['ArticleID','Title', 'PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_1['Doctypes']
#print("Completed in %f" % (time.time() - st))
article_df_2 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = list(range(1955, 1965)),
columns = ['ArticleID','Title','PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_2['Doctypes']
article_df_3 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = list(range(1965, 1975)),
columns = ['ArticleID', 'Title','PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_3['Doctypes']
article_df_4 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = list(range(1975, 1990)),
columns = ['ArticleID', 'Title','PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_4['Doctypes']
article_df_5 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = list(range(1990, 2005)),
columns = ['ArticleID','Title', 'PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_5['Doctypes']
article_df_6 = wosutil.load_wos_data(name = 'article', path2rawdata = path2rawdata,
year_list = list(range(2005, 2016)),
columns = ['ArticleID', 'Title','PubYear','Doctypes'],
dropna = ['ArticleID', 'PubYear'],
duplicate_subset = ['ArticleID'],
isindict = {'Doctypes':np.sort(['Article','Letter','Review','Note'])},
verbose = 50)
del article_df_6['Doctypes']
article_df_12=pd.concat([article_df_1, article_df_2], ignore_index=True)
del article_df_1
del article_df_2
article_df_34=pd.concat([article_df_3, article_df_4], ignore_index=True)
del article_df_3
del article_df_4
article_df_56= | pd.concat([article_df_5, article_df_6], ignore_index=True) | pandas.concat |
__author__ = "<NAME>"
import numpy
import pandas
import copy
import math
from . import Utilities
from . import MultiPrediXcanAssociation, PrediXcanAssociation
from ..expression import HDF5Expression, Expression
########################################################################################################################
def mp_callback(gene, model, result, vt_projection, variance, model_keys, coefs, save):
save["coefs"] = coefs
class Context(object):
def __init__(self, expression_manager, phenotype_generator, filter, do_predixcan=False, only_truth=False):
self.expression_manager = expression_manager
self.phenotype_generator = phenotype_generator
self.filter = filter
self.do_predixcan = do_predixcan
self.only_truth = only_truth
def do_predixcan(self):
return self.do_predixcan
def get_genes(self):
return self.expression_manager.get_genes()
def __enter__(self):
self.expression_manager.enter()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.expression_manager.exit()
def get_mp_simulation(self, gene):
if not gene:
return Utilities.DumbMTPContext(None, None, None, self.filter), None, None
expression = self.expression_manager.expression_for_gene(gene)
phenotype, description = self.phenotype_generator.get(expression, gene)
if phenotype is None:
return None, None, None
_cp = None
if self.do_predixcan:
_cp = {}
for t in description.itertuples():
if "covariate" in t.variable: continue
_cp[t.variable] = Utilities.DumbPContext(expression[t.variable], phenotype, gene, self.filter)
if self.only_truth:
expression = {x.variable:expression[x.variable] for x in description.itertuples() if x.variable in expression}
return Utilities.DumbMTPContext(expression, phenotype, gene, self.filter), _cp, description
########################################################################################################################
class PhenotypeGenerator(object):
def __init__(self): raise RuntimeError("Not implemented")
class RandomPhenotypeGenerator(PhenotypeGenerator):
def __init__(self):
pass
def get(self, expression, gene):
k = list(expression.keys())[0]
e = expression[k]
n = len(e)
pheno = numpy.random.uniform(size=n)
description = pandas.DataFrame({ "variable":["covariate"], "param": [1.0]})
return pheno, description
class LinearCombinationPhenotypeGenerator(PhenotypeGenerator):
def __init__(self, combination, covariate_sd, use_all=None):
self.combination = combination
self.covariate_sd = covariate_sd
self.use_all = use_all
def get(self, expression, gene):
combination = copy.copy(self.combination)
if self.use_all:
if type(self.use_all) == float:
c = self.use_all
elif self.use_all == "ONE_VAR":
c = math.sqrt(1.0 / len(expression))
elif self.use_all == "FIX_VAR":
c = 1.0
combination["covariate"] = math.sqrt(len(expression)*99)
else:
raise RuntimeError("Unsupported option")
for e in list(expression.keys()):
combination[e] = c
return _pheno_from_combination(expression, combination, self.covariate_sd)
class CombinationOfCorrelatedPhenotypeGenerator(PhenotypeGenerator):
def __init__(self, covariate_coefficient, covariate_sd, threshold):
self.covariate_coefficient = covariate_coefficient
self.threshold = threshold
self.covariate_sd = covariate_sd
def get(self, expression, gene):
# Get the tissue with the most correlated siblings;
# then average them build a phenotype
values = list(expression.values())
if len(values) == 1:
return None, None
e = values
c = numpy.corrcoef(e)
d = len(expression)
f = 0
r = 0
for i in range(0, d):
f_ = numpy.sum(c[i] > self.threshold)
if f_ > f:
r = i
f = f_
if f<2:
return None, None
which = c[r] > self.threshold
keys = list(expression.keys())
combination = {keys[i]:math.sqrt(1.0/f) for i in range(0, d) if which[i]}
#for i in xrange(0,d):
# combination["covariate_{}".format(i)] = 10.0/f
combination["covariate"] = self.covariate_coefficient
return _pheno_from_combination(expression, combination, self.covariate_sd)
def _pheno_from_combination(expression, combination, covariate_sd):
ok = True
_k = list(expression.keys())[0]
_e = expression[_k]
n = len(_e)
e = numpy.zeros(n)
used = set()
for k, v in combination.items():
if k in expression:
e += expression[k] * v
used.add(k)
elif "covariate" in k:
e += numpy.random.normal(scale=covariate_sd, size=n) * v
used.add(k)
else:
# If we couldn't build a model with the desired combination, abort
ok = False
break
if not ok:
return None, None
_c = {x: v for x, v in combination.items() if x in used}
pheno = e
description = pandas.DataFrame({"variable": list(_c.keys()), "param": list(_c.values())})
return pheno, description
########################################################################################################################
class SExpressionManager(Expression.ExpressionManager):
def __init__(self, em):
self.em = em
self.which = None
def expression_for_gene(self, gene):
e = self.em.expression_for_gene(gene)
if self.which is None:
n = len(e[list(e.keys())[0]])
s = 10000
#self.which = numpy.random.choice([True, False], size=n, p=[s*1.0/n, 1 - s*1.0/n])
self.which = list(numpy.random.choice(range(0,n), size=s, replace=False))
e = {k:v[self.which] for k,v in e.items()}
return e
def get_genes(self): return self.em.get_genes()
def enter(self): return self.em.enter()
def exit(self): self.em.exit()
def context_from_args(args):
#expression_ = HDF5Expression.ExpressionManager(args.hdf5_expression_folder, args.expression_pattern, code_999=args.code_999, standardise= args.standardize_expression)
#expression = SExpressionManager(expression_)
expression = HDF5Expression.ExpressionManager(args.expression_folder, args.expression_pattern,
code_999=args.code_999, standardise=args.standardize_expression)
def _argumentize(x, t, default=1.0):
return t(x) if x is not None else default
p = {x[0]: x[1] for x in args.simulation_parameters} if args.simulation_parameters else {}
covariate_coefficient = _argumentize(p.get("covariate_coefficient"), float)
covariate_sd = _argumentize(p.get("covariate_sd"), float)
if args.simulation_type == "random":
phenotype = RandomPhenotypeGenerator()
elif args.simulation_type == "combination":
use_all = None
if "model_spec" in p:
_c = p.get("model_spec")
if not _c:
_c = {}
else:
_c = _c.split()
_c = {_c[i*2]:float(_c[i*2+1]) for i in range(0,len(_c)/2)}
elif "use_tissues" in p:
_c = p.get("use_tissues").strip().split()
_c = {x:math.sqrt(1.0/len(_c)) for x in _c}
elif "use_all" in p:
_c = {}
if p["use_all"] == "ONE_VAR" or p["use_all"] == "FIX_VAR":
use_all = p["use_all"]
else:
use_all = float(p["use_all"])
_c["covariate"] = covariate_coefficient
phenotype = LinearCombinationPhenotypeGenerator(_c, covariate_sd=covariate_sd, use_all=use_all)
elif args.simulation_type == "combination_from_correlated":
threshold = _argumentize(p.get("threshold"), float, 0.9)
phenotype = CombinationOfCorrelatedPhenotypeGenerator(covariate_coefficient=covariate_coefficient, covariate_sd=covariate_sd, threshold=threshold)
else:
raise RuntimeError("Wrong phenotype simulation spec")
filter = Utilities._filter_from_args(args)
context = Context(expression, phenotype, filter, args.do_predixcan, args.only_truth)
return context
########################################################################################################################
def simulate(gene, context):
save_results = {}
_cb = lambda gene, model, result, vt_projection, variance, model_keys, coefs: mp_callback(gene, model, result, vt_projection, variance, model_keys, coefs, save_results)
_context_mt, _context_p, _description = context.get_mp_simulation(gene)
if _context_mt is None:
return None, None, None
p = None
if _context_p:
p = pandas.DataFrame()
for model,_c in _context_p.items():
p_ = PrediXcanAssociation.predixcan_association(gene, _c)
p_ = PrediXcanAssociation.dataframe_from_results([p_])
p_["model"] = model
p = | pandas.concat([p,p_]) | pandas.concat |
import unittest
import pandas as pd
# fix to allow zip_longest on Python 2.X and 3.X
try: # Python 3
from itertools import zip_longest
except ImportError: # Python 2
from itertools import izip_longest as zip_longest
from math import fabs
from mock import patch, sentinel, Mock, MagicMock
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.Execution import Execution
from ib.ext.OrderState import OrderState
from zipline.gens.brokers.ib_broker import IBBroker, TWSConnection
from zipline.testing.fixtures import WithSimParams
from zipline.finance.execution import (StopLimitOrder,
MarketOrder,
StopOrder,
LimitOrder)
from zipline.finance.order import ORDER_STATUS
from zipline.testing.fixtures import (ZiplineTestCase,
WithDataPortal)
@unittest.skip("Failing on CI - Fix later")
class TestIBBroker(WithSimParams,
WithDataPortal,
ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (1, 2)
ASSET_FINDER_EQUITY_SYMBOLS = ("SPY", "XIV")
@staticmethod
def _tws_bars():
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
tws = TWSConnection("localhost:9999:1111")
tws._add_bar('SPY', 12.4, 10,
pd.to_datetime('2017-09-27 10:30:00', utc=True),
10, 12.401, False)
tws._add_bar('SPY', 12.41, 10,
pd.to_datetime('2017-09-27 10:30:40', utc=True),
20, 12.411, False)
tws._add_bar('SPY', 12.44, 20,
pd.to_datetime('2017-09-27 10:31:10', utc=True),
40, 12.441, False)
tws._add_bar('SPY', 12.74, 5,
pd.to_datetime('2017-09-27 10:37:10', utc=True),
45, 12.741, True)
tws._add_bar('SPY', 12.99, 15,
pd.to_datetime('2017-09-27 12:10:00', utc=True),
60, 12.991, False)
tws._add_bar('XIV', 100.4, 100,
pd.to_datetime('2017-09-27 9:32:00', utc=True),
100, 100.401, False)
tws._add_bar('XIV', 100.41, 100,
pd.to_datetime('2017-09-27 9:32:20', utc=True),
200, 100.411, True)
tws._add_bar('XIV', 100.44, 200,
pd.to_datetime('2017-09-27 9:41:10', utc=True),
400, 100.441, False)
tws._add_bar('XIV', 100.74, 50,
pd.to_datetime('2017-09-27 11:42:10', utc=True),
450, 100.741, False)
return tws.bars
@staticmethod
def _create_contract(symbol):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = 'STK'
return contract
@staticmethod
def _create_order(action, qty, order_type, limit_price, stop_price):
order = Order()
order.m_action = action
order.m_totalQuantity = qty
order.m_auxPrice = stop_price
order.m_lmtPrice = limit_price
order.m_orderType = order_type
return order
@staticmethod
def _create_order_state(status_):
status = OrderState()
status.m_status = status_
return status
@staticmethod
def _create_exec_detail(order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id):
exec_detail = Execution()
exec_detail.m_orderId = order_id
exec_detail.m_shares = shares
exec_detail.m_cumQty = cum_qty
exec_detail.m_price = price
exec_detail.m_avgPrice = avg_price
exec_detail.m_time = exec_time
exec_detail.m_execId = exec_id
return exec_detail
@patch('zipline.gens.brokers.ib_broker.TWSConnection')
def test_get_spot_value(self, tws):
dt = None # dt is not used in real broker
data_freq = 'minute'
asset = self.asset_finder.retrieve_asset(1)
bars = {'last_trade_price': [12, 10, 11, 14],
'last_trade_size': [1, 2, 3, 4],
'total_volume': [10, 10, 10, 10],
'vwap': [12.1, 10.1, 11.1, 14.1],
'single_trade_flag': [0, 1, 0, 1]}
last_trade_times = [pd.to_datetime('2017-06-16 10:30:00', utc=True),
pd.to_datetime('2017-06-16 10:30:11', utc=True),
pd.to_datetime('2017-06-16 10:30:30', utc=True),
pd.to_datetime('2017-06-17 10:31:9', utc=True)]
index = pd.DatetimeIndex(last_trade_times)
broker = IBBroker(sentinel.tws_uri)
tws.return_value.bars = {asset.symbol: pd.DataFrame(
index=index, data=bars)}
price = broker.get_spot_value(asset, 'price', dt, data_freq)
last_trade = broker.get_spot_value(asset, 'last_traded', dt, data_freq)
open_ = broker.get_spot_value(asset, 'open', dt, data_freq)
high = broker.get_spot_value(asset, 'high', dt, data_freq)
low = broker.get_spot_value(asset, 'low', dt, data_freq)
close = broker.get_spot_value(asset, 'close', dt, data_freq)
volume = broker.get_spot_value(asset, 'volume', dt, data_freq)
# Only the last minute is taken into account, therefore
# the first bar is ignored
assert price == bars['last_trade_price'][-1]
assert last_trade == last_trade_times[-1]
assert open_ == bars['last_trade_price'][1]
assert high == max(bars['last_trade_price'][1:])
assert low == min(bars['last_trade_price'][1:])
assert close == bars['last_trade_price'][-1]
assert volume == sum(bars['last_trade_size'][1:])
def test_get_realtime_bars_produces_correct_df(self):
bars = self._tws_bars()
with patch('zipline.gens.brokers.ib_broker.TWSConnection'):
broker = IBBroker(sentinel.tws_uri)
broker._tws.bars = bars
assets = (self.asset_finder.retrieve_asset(1),
self.asset_finder.retrieve_asset(2))
realtime_history = broker.get_realtime_bars(assets, '1m')
asset_spy = self.asset_finder.retrieve_asset(1)
asset_xiv = self.asset_finder.retrieve_asset(2)
assert asset_spy in realtime_history
assert asset_xiv in realtime_history
spy = realtime_history[asset_spy]
xiv = realtime_history[asset_xiv]
assert list(spy.columns) == ['open', 'high', 'low', 'close', 'volume']
assert list(xiv.columns) == ['open', 'high', 'low', 'close', 'volume']
# There are 159 minutes between the first (XIV @ 2017-09-27 9:32:00)
# and the last bar (SPY @ 2017-09-27 12:10:00)
assert len(realtime_history) == 159
spy_non_na = spy.dropna()
xiv_non_na = xiv.dropna()
assert len(spy_non_na) == 4
assert len(xiv_non_na) == 3
assert spy_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 10:30:00', utc=True)
assert spy_non_na.iloc[0].open == 12.40
assert spy_non_na.iloc[0].high == 12.41
assert spy_non_na.iloc[0].low == 12.40
assert spy_non_na.iloc[0].close == 12.41
assert spy_non_na.iloc[0].volume == 20
assert spy_non_na.iloc[1].name == pd.to_datetime(
'2017-09-27 10:31:00', utc=True)
assert spy_non_na.iloc[1].open == 12.44
assert spy_non_na.iloc[1].high == 12.44
assert spy_non_na.iloc[1].low == 12.44
assert spy_non_na.iloc[1].close == 12.44
assert spy_non_na.iloc[1].volume == 20
assert spy_non_na.iloc[-1].name == pd.to_datetime(
'2017-09-27 12:10:00', utc=True)
assert spy_non_na.iloc[-1].open == 12.99
assert spy_non_na.iloc[-1].high == 12.99
assert spy_non_na.iloc[-1].low == 12.99
assert spy_non_na.iloc[-1].close == 12.99
assert spy_non_na.iloc[-1].volume == 15
assert xiv_non_na.iloc[0].name == pd.to_datetime(
'2017-09-27 9:32:00', utc=True)
assert xiv_non_na.iloc[0].open == 100.4
assert xiv_non_na.iloc[0].high == 100.41
assert xiv_non_na.iloc[0].low == 100.4
assert xiv_non_na.iloc[0].close == 100.41
assert xiv_non_na.iloc[0].volume == 200
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_new_order_appears_in_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert len(broker.orders) == 1
assert broker.orders[order.id] == order
assert order.open
assert order.asset == asset
assert order.amount == amount
assert order.limit == limit_price
assert order.stop == stop_price
assert (order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_open_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
ib_order_id = 3
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 40, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.HELD
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_loaded_from_exec_details(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
(req_id, ib_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id) = (7, 3, 12, 40,
12.43, 12.50,
'20160101 14:20', 4)
ib_contract = self._create_contract(str(asset.symbol))
exec_detail = self._create_exec_detail(
ib_order_id, shares, cum_qty, price, avg_price,
exec_time, exec_id)
broker._tws.execDetails(req_id, ib_contract, exec_detail)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == -40
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_orders_updated_from_order_status(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
# orderStatus calls only work if a respective order has been created
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
ib_order_id = order.broker_order_id
status = 'Filled'
filled = 14
remaining = 9
avg_fill_price = 12.4
perm_id = 99
parent_id = 88
last_fill_price = 12.3
client_id = 1111
why_held = ''
broker._tws.orderStatus(ib_order_id,
status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id,
why_held)
assert len(broker.orders) == 1
zp_order = list(broker.orders.values())[-1]
assert zp_order.broker_order_id == ib_order_id
assert zp_order.status == ORDER_STATUS.FILLED
assert not zp_order.open
assert zp_order.asset == asset
assert zp_order.amount == amount
assert zp_order.limit == limit_price
assert zp_order.stop == stop_price
assert (zp_order.dt - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_multiple_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
order_count += 1
assert order_count == len(broker.orders)
assert broker.orders[order.id] == order
is_buy = amount > 0
assert order.stop == order_style.get_stop_price(is_buy)
assert order.limit == order_style.get_limit_price(is_buy)
def test_order_ref_serdes(self):
# Even though _creater_order_ref and _parse_order_ref is private
# it is helpful to test as it plays a key role to re-create orders
order = self._create_order("BUY", 66, "STP LMT", 13.4, 44.2)
serialized = IBBroker._create_order_ref(order)
deserialized = IBBroker._parse_order_ref(serialized)
assert deserialized['action'] == order.m_action
assert deserialized['qty'] == order.m_totalQuantity
assert deserialized['order_type'] == order.m_orderType
assert deserialized['limit_price'] == order.m_lmtPrice
assert deserialized['stop_price'] == order.m_auxPrice
assert (deserialized['dt'] - pd.to_datetime('now', utc=True) <
pd.Timedelta('10s'))
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_transactions_not_created_for_incompl_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-123')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
amount = -4
limit_price = 43.1
stop_price = 6
style = StopLimitOrder(limit_price=limit_price, stop_price=stop_price)
order = broker.order(asset, amount, style)
assert not broker.transactions
assert len(broker.orders) == 1
assert broker.orders[order.id].open
ib_order_id = order.broker_order_id
ib_contract = self._create_contract(str(asset.symbol))
action, qty, order_type, limit_price, stop_price = \
'SELL', 4, 'STP LMT', 4.3, 2
ib_order = self._create_order(
action, qty, order_type, limit_price, stop_price)
ib_state = self._create_order_state('PreSubmitted')
broker._tws.openOrder(ib_order_id, ib_contract, ib_order, ib_state)
broker._tws.orderStatus(ib_order_id, status='Cancelled', filled=0,
remaining=4, avg_fill_price=0.0, perm_id=4,
parent_id=4, last_fill_price=0.0, client_id=32,
why_held='')
assert not broker.transactions
assert len(broker.orders) == 1
assert not broker.orders[order.id].open
broker._tws.orderStatus(ib_order_id, status='Inactive', filled=0,
remaining=4, avg_fill_price=0.0, perm_id=4,
parent_id=4, last_fill_price=0.0,
client_id=1111, why_held='')
assert not broker.transactions
assert len(broker.orders) == 1
assert not broker.orders[order.id].open
@patch('zipline.gens.brokers.ib_broker.symbol_lookup')
def test_transactions_created_for_complete_orders(self, symbol_lookup):
with patch('zipline.gens.brokers.ib_broker.TWSConnection.connect'):
broker = IBBroker("localhost:9999:1111", account_id='TEST-<PASSWORD>')
broker._tws.nextValidId(0)
asset = self.asset_finder.retrieve_asset(1)
symbol_lookup.return_value = asset
order_count = 0
for amount, order_style in [
(-112, StopLimitOrder(limit_price=9, stop_price=1)),
(43, LimitOrder(limit_price=10)),
(-99, StopOrder(stop_price=8)),
(-32, MarketOrder())]:
order = broker.order(asset, amount, order_style)
broker._tws.orderStatus(order.broker_order_id, 'Filled',
filled=int(fabs(amount)), remaining=0,
avg_fill_price=111, perm_id=0, parent_id=1,
last_fill_price=112, client_id=1111,
why_held='')
contract = self._create_contract(str(asset.symbol))
(shares, cum_qty, price, avg_price, exec_time, exec_id) = \
(int(fabs(amount)), int(fabs(amount)), 12.3, 12.31,
pd.to_datetime('now', utc=True), order_count)
exec_detail = self._create_exec_detail(
order.broker_order_id, shares, cum_qty,
price, avg_price, exec_time, exec_id)
broker._tws.execDetails(0, contract, exec_detail)
order_count += 1
assert len(broker.transactions) == order_count
transactions = [tx
for tx in broker.transactions.values()
if tx.order_id == order.id]
assert len(transactions) == 1
assert broker.transactions[exec_id].asset == asset
assert broker.transactions[exec_id].amount == order.amount
assert (broker.transactions[exec_id].dt -
| pd.to_datetime('now', utc=True) | pandas.to_datetime |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = | pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt") | pandas.Series |
from pymongo import MongoClient, DESCENDING
import pandas as pd
from portfolio.iportfolio import IPortfolio
from database.portfoliodb import PortfolioDb
class APortfolio(IPortfolio):
def __init__(self,name):
self.name = name
self.db = PortfolioDb(name)
super().__init__()
def load():
self.strats = []
def sim():
return | pd.DataFrame([{}]) | pandas.DataFrame |
import numpy as np
import os
from numpy.testing import assert_equal, assert_almost_equal
from svrimg.utils.get_tables import (_create_unid, _create_dtime, _preprocess_svrgis_table,
_create_svrgis_table, _create_index_table,
get_table, get_pred_tables)
test_data_dir = os.environ.get('TEST_DATA_DIR')
def test_create_unid():
import pandas as pd
d = {'om': [1, 2], 'date': ['2011-04-27', '2011-04-27'], 'time': ['12:00:00', '12:15:00']}
test_unid = | pd.DataFrame.from_dict(d) | pandas.DataFrame.from_dict |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = | pd.Series() | pandas.Series |
# -*- coding: utf-8 -*-
"""Causal graphs have implications that can be tested in the context of a specific dataset.
This module includes algorithms to perform those tests.
"""
from collections import abc
from typing import Iterable, Optional, Union
import pandas as pd
from ananke.graphs import SG
from tqdm import tqdm
from .conditional_independencies import get_conditional_independencies
from ..struct import DSeparationJudgement
from ..util.stat_utils import cressie_read
class Falsifications(abc.Sequence):
"""A list of variables pairs that failed the D-separation and covariance test.
Has an extra 'evidence' property that is a dictionary.
- Keys are the d-separated variable pairs
- Values are the covariances measured between them.
"""
def __init__(self, failures, evidence: pd.DataFrame):
"""Create Falsifications result.
:param failures: Sequence of implications that did not pass
:param evidence: Collection of all implications tested
"""
self._failures = failures
self.evidence = evidence
def __getitem__(self, i):
return self._failures[i]
def __len__(self):
return len(self._failures)
def __repr__(self):
return repr(self._failures) + "+evidence"
def falsifications(
to_test: Union[SG, Iterable[DSeparationJudgement]],
df: pd.DataFrame,
significance_level: float = 0.05,
max_given: Optional[int] = None,
verbose: bool = False,
) -> Falsifications:
"""Test conditional independencies implied by a graph.
:param to_test: Either a graph to generate d-separation from or a list of D-separations to check.
:param df: Data to check for consistency with a causal implications
:param significance_level: Significance for p-value test
:param max_given: The maximum set size in the power set of the vertices minus the d-separable pairs
:param verbose: If true, use tqdm for status updates.
:return: Falsifications report
"""
if isinstance(to_test, SG):
to_test = get_conditional_independencies(to_test, max_conditions=max_given, verbose=verbose)
variances = {
(judgement.left, judgement.right, judgement.conditions): cressie_read(
judgement.left, judgement.right, judgement.conditions, df, boolean=False
)
for judgement in tqdm(to_test, disable=not verbose, desc="Checking conditionals")
}
rows = [
(left, right, given, chi, p, dof)
for (left, right, given), (chi, dof, p) in variances.items()
]
evidence = (
| pd.DataFrame(rows, columns=["left", "right", "given", "chi^2", "p", "dof"]) | pandas.DataFrame |
from contextlib import suppress
from typing import List
from pandas import DataFrame, to_datetime, to_timedelta
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import DateExtractStep
from weaverbird.pipeline.steps.date_extract import DATE_INFO
from .utils.cast import cast_to_int
OPERATIONS_MAPPING = {
'minutes': 'minute',
'seconds': 'second',
'dayOfYear': 'dayofyear',
}
def execute_date_extract(
step: DateExtractStep,
df: DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> DataFrame:
date_info: List[DATE_INFO]
if step.operation: # for retrocompatibility
date_info = [step.operation]
new_columns = [step.new_column_name or f'{step.column}_{step.operation}']
else:
date_info = step.date_info
new_columns = step.new_columns
for dt_info, new_col in zip(date_info, new_columns):
serie_dt = df[step.column].dt
if dt_info == 'week':
# cast in float and not in int to manage NaN properly
result = serie_dt.strftime('%U').astype(float)
elif dt_info == 'dayOfWeek':
# result should be between 1 (sunday) and 7 (saturday)
result = (serie_dt.dayofweek + 2) % 7
result = result.replace({0: 7})
elif dt_info == 'isoYear':
result = serie_dt.isocalendar().year
elif dt_info == 'isoWeek':
result = serie_dt.isocalendar().week
elif dt_info == 'isoDayOfWeek':
result = serie_dt.isocalendar().day
elif dt_info == 'firstDayOfYear':
result = to_datetime(DataFrame({'year': serie_dt.year, 'month': 1, 'day': 1}))
elif dt_info == 'firstDayOfMonth':
result = to_datetime(
DataFrame({'year': serie_dt.year, 'month': serie_dt.month, 'day': 1})
)
elif dt_info == 'firstDayOfWeek':
# dayofweek should be between 1 (sunday) and 7 (saturday)
dayofweek = (serie_dt.dayofweek + 2) % 7
dayofweek = dayofweek.replace({0: 7})
# we subtract a number of days corresponding to(dayOfWeek - 1)
result = df[step.column] - to_timedelta(dayofweek - 1, unit='d')
# the result should be returned with 0-ed time information
result = | to_datetime(result.dt.date) | pandas.to_datetime |
import pytest
import operator
import pandas as pd
from pandas.core import ops
from .base import BaseExtensionTests
class BaseOpsUtil(BaseExtensionTests):
def get_op_from_name(self, op_name):
short_opname = op_name.strip('_')
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
def check_opname(self, s, op_name, other, exc=Exception):
op = self.get_op_from_name(op_name)
self._check_op(s, op, other, op_name, exc)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
expected = s.combine(other, op)
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=Exception):
# divmod has multiple return values, so check separatly
if exc is None:
result_div, result_mod = op(s, other)
if op is divmod:
expected_div, expected_mod = s // other, s % other
else:
expected_div, expected_mod = other // s, other % s
self.assert_series_equal(result_div, expected_div)
self.assert_series_equal(result_mod, expected_mod)
else:
with pytest.raises(exc):
divmod(s, other)
class BaseArithmeticOpsTests(BaseOpsUtil):
"""Various Series and DataFrame arithmetic ops methods.
Subclasses supporting various ops should set the class variables
to indicate that they support ops of that kind
* series_scalar_exc = TypeError
* frame_scalar_exc = TypeError
* series_array_exc = TypeError
* divmod_exc = TypeError
"""
series_scalar_exc = TypeError
frame_scalar_exc = TypeError
series_array_exc = TypeError
divmod_exc = TypeError
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# series & scalar
op_name = all_arithmetic_operators
s = pd.Series(data)
self.check_opname(s, op_name, s.iloc[0], exc=self.series_scalar_exc)
@pytest.mark.xfail(run=False, reason="_reduce needs implementation",
strict=True)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op_name = all_arithmetic_operators
df = pd.DataFrame({'A': data})
self.check_opname(df, op_name, data[0], exc=self.frame_scalar_exc)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op_name = all_arithmetic_operators
s = | pd.Series(data) | pandas.Series |
import pandas as pd
def run_fengxian(path_1, path_2):
"""
name:“求风险值函数”
function: 将分组求和后的销项发票信息和进项发票信息合并; 求销售净利率
path_1:销项发票
path_2:进项发票
"""
df_1 = pd.read_csv(path_1, encoding='UTF-8') # 销项
df_2 = pd.read_csv(path_2, encoding='UTF-8') # 进项
# 删除没用的列
del df_1['发票号码']
del df_1['开票日期']
del df_1['购方单位代号']
del df_2['发票号码']
del df_2['开票日期']
del df_2['销方单位代号']
# 只保留发票状态中有效发票的行
df_1 = df_1[(df_1['发票状态'].isin(['有效发票']))]
df_2 = df_2[(df_2['发票状态'].isin(['有效发票']))]
# 删除整个数据表中所有含有负值的行
df_1 = df_1[df_1['金额'].apply(lambda x: x >= 0)]
df_1 = df_1[df_1['税额'].apply(lambda x: x >= 0)]
df_2 = df_2[df_2['金额'].apply(lambda x: x >= 0)]
df_2 = df_2[df_2['税额'].apply(lambda x: x >= 0)]
# 删除发票状态,为了后面的分组求和(中文求不了)
del df_1['发票状态']
del df_2['发票状态']
# 分组求和
df_1 = df_1.groupby('企业代号').sum()
df_1 = df_1.rename(columns={'金额': '金额_销项', '税额': '税额_销项', '价税合计': '价税合计_销项'})
df_2 = df_2.groupby('企业代号').sum()
df_2 = df_2.rename(columns={'金额': '金额_进项', '税额': '税额_进项', '价税合计': '价税合计_进项'})
# 合并两张表
result = pd.concat([df_1, df_2], axis=1)
# 求销售净利率
result['净利润'] = result['金额_销项'] - result['金额_进项'] + result['税额_进项'] - result['税额_销项']
result['销售收入'] = result['金额_销项']
result['销售净利率'] = result['净利润'] / result['销售收入']
# 求风险值
result["exp_销售净利率"] = result["销售净利率"].apply(pd.np.exp)
result['风险值'] = 1 / (result['exp_销售净利率'])
return result
def run_fenxian_p3(path):
"""
name:“求问题三的风险值”
path:求出的风险文件路径
"""
df = pd.read_csv(path, encoding='GBK')
result = df
result['净利润'] = result['突发影响函数值'](result['金额_销项'] - result['金额_进项'] + result['税额_进项'] - result['税额_销项'])
result['销售收入'] = result['金额_销项']
result['销售净利率'] = result['净利润'] / result['销售收入']
# 求风险值
result["exp_销售净利率"] = result["销售净利率"].apply(pd.np.exp)
result['风险值'] = 1 / (result['exp_销售净利率'])
return
def run_fenxian_qujian(path):
"""
name:“求风险值函数区间”
function: 【总年】将求出来的风险值去掉<1的【单年】不用
path:求出的风险文件路径
"""
df = | pd.read_csv(path, encoding='GBK') | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = | DatetimeIndex(data) | pandas.DatetimeIndex |
#!/usr/bin/env python3
import argparse
from collections import OrderedDict
from glob import glob
import logging
import numpy
import pandas
import scipy.special
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
from singleqc import configure_logging, read_concentrations
logger = logging.getLogger('tube_likelihood')
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
configure_logging(args)
concentrations = read_concentrations(args.concentrations)
sep = args.sep
data = []
pool = read_rsem_quantifications(args.pool, 'pool', args.quantification, concentrations)
data.extend(pool)
single = read_rsem_quantifications(args.single, 'single', args.quantification, concentrations)
data.extend(single)
data.extend(read_combined_quantifications(args.combined_pool, 'pool', args.quantification, concentrations, sep))
data.extend(read_combined_quantifications(args.combined_single, 'single', args.quantification, concentrations, sep))
data = pandas.concat(data)
if len(data) == 0:
parser.error('some libraries need to be specified')
data = log_likelihood(data)
data = data.sort_values(by="run_LR", ascending=True)
data = chi(data)
if args.output:
data.to_csv(args.output, sep='\t', index=False)
else:
print(data.to_string(index=False))
def make_parser():
parser = argparse.ArgumentParser()
group = parser.add_argument_group('combined quantification file')
group.add_argument('--combined-pool', action='append', default=[],
help='file with merged pool-split quantifications to read')
group.add_argument('--combined-single', action='append', default=[],
help='file with merge single cell quantifications to read')
group = parser.add_argument_group('raw RSEM files')
group.add_argument('-p', '--pool', action='append', default=[],
help='pool-split RSEM quantification files')
group.add_argument('-s', '--single', action='append', default=[],
help='single-cell RSEM quantification files')
group.add_argument('-q', '--quantification', default='FPKM',
help='Which RSEM quantification column to use')
parser.add_argument('-c', '--concentrations', required=True,
help='name of file with concentrations for spike ins')
parser.add_argument('-o', '--output', help='output name')
parser.add_argument('--sep', help='quantification file seperator', choices=['\t', ','], default='\t')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
return parser
def read_combined_quantifications(filenames, tube_type, quantification_name, concentrations, sep=','):
"""Read several combined quantification files.
this is a gene_id vs library_id tables, if there is a column named "gene_name" it
will be ignored.
"""
data = []
for filename in filenames:
data.append(read_combined_quantification(filename, tube_type, quantification_name, concentrations, sep))
return data
def read_combined_quantification(filename, tube_type, quantification_name, concentrations, sep=','):
"""Read a combined quantification files gene_id vs library_id
this is a gene_id vs library_id tables, if there is a column named "gene_name" it
will be ignored.
"""
dtype = {'gene_id': str, 'gene_name': str}
quantifications = pandas.read_csv(filename, dtype=dtype, sep=sep, header=0)
quantifications = quantifications.set_index('gene_id')
quantifications.reindex(concentrations.index)
data = []
for column in quantifications.columns:
if column == 'gene_name':
logger.info('Ignoring gene_name column')
else:
spikes = make_spike_success_table(
quantifications[column].to_frame(quantification_name),
concentrations,
quantification_name,
column,
tube_type)
data.append(spikes)
return pandas.concat(data)
def read_rsem_quantifications(patterns, tube_type, quantification_name, concentrations):
"""Read a specific quantification type column out of RSEM quantification files.
"""
if patterns is None or len(patterns) == 0:
return []
data = []
for pattern in patterns:
filenames = glob(pattern)
for filename in filenames:
rsem = | pandas.read_csv(filename, sep='\t', header=0, usecols=['gene_id', quantification_name]) | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin, BaseEstimator
from ...utils import check_type, check_has_columns
def _nan_to_end(enc):
'''
:param enc: an fLabelEncoder instance
:return: enc after moving nan in classes_ attribte to end of list
'''
# boolean saying if null present in data
null_cmp = np.asarray(pd.isnull(enc.classes_)) | \
np.asarray(list(map(lambda label: str(label).lower() == 'nan', enc.classes_)))
has_null = np.any(null_cmp)
if has_null:
null_index = np.nonzero(null_cmp)[0][0]
enc.classes_.append(enc.classes_.pop(null_index))
return enc
class fOneHotEncoder(BaseEstimator, TransformerMixin):
def __init__(self, sep='_', dummy_na=False, columns=None):
self.sep = sep
self.dummy_na = dummy_na
self.columns = columns
def fit(self, X, y=None):
# encoding columns as integers {0, 1, ..., n_classes}
self.enc_ = fOrdinalEncoder(nan_handle = 'hard', columns=self.columns, copy=True).fit(X)
# propagating nan values to the end of the classes_ list in fLabelEncoder's
for i, enc in enumerate(self.enc_.encoders_):
self.enc_.encoders_[i] = _nan_to_end(enc)
return self
def transform(self, X):
X_ord = self.enc_.transform(X)
names = []
arrays = []
for i, column in enumerate(self.enc_.columns_):
classes = self.enc_.encoders_[i].classes_
if self.dummy_na:
rows = np.eye(len(classes))
else:
rows = np.vstack([np.eye(len(classes)-1), np.zeros(len(classes)-1)[np.newaxis, :]])
array = rows.take(X_ord[column].values, axis=0)
name_list = [str(column) + self.sep + str(cls) for cls in classes]
if not self.dummy_na:
name_list = name_list[:-1]
arrays.append(array)
names.extend(name_list)
arrays = np.hstack(arrays)
dummies = | pd.DataFrame(arrays,index=X.index, columns=names) | pandas.DataFrame |
import unittest
import pandas as pd
import calendar
import time
from application.model_functions import *
class Testing(unittest.TestCase):
def test_isempty(self):
df1 = pd.DataFrame()
self.assertTrue(isempty_df(df1))
df2 = pd.DataFrame([[0,'abcd',0,1,123]],columns=['a','b','c','d','e'])
self.assertFalse(isempty_df(df2))
def test_convert_to_epoch(self):
#TODO: REVIEW FUNCTION
pass
# df1 = pd.DataFrame([["Wednesday, 27-Jul-16 11:37:51 UTC"]],columns=['time'])
# df1 = convert_to_epoch(df1, "time")
# df2 = pd.DataFrame([[1469619471]],columns=['time'])
# self.assertEqual(df1['time'][0], df2['time'][0])
def room_number(self):
df1 = pd.DataFrame([['B002']],columns=['room'])
df2 = | pd.DataFrame([['B106']],columns=['room']) | pandas.DataFrame |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [ | pd.Series([1]) | pandas.Series |
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
This module trains the NFM model based on the (updating) data and saves
it in order for the recommendation engine to import it.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#import datetime as dt
import logging
import pandas as pd
import pickle
from time import sleep
from sklearn.decomposition import NMF
logging.basicConfig(#filename='RecommenderLog.log',
format='%(asctime)s:%(levelname)s: %(message)s')
def update_model(df):
"""
trains the model based on the latest input
ARGUMENT: The pandas dataframe containing the recommandations
"""
# Changing dataframe to numpy ndarray for building R matrix
R = pd.DataFrame(df, index=df.index, columns=df.columns).values
#create a model and set the hyperparameters
# 20 Genres (from EDA.py) + 106 years -> 126 number of components
model = NMF(n_components=126, init='random', random_state=1, max_iter=100000, solver='cd')
# fitting the model to R
fit_model = model.fit(R)
return fit_model, R
if __name__ == '__main__':
while True:
# Loading the up to date preprocessed rating file
# X-Axis -> userId , Y-Axis -> movieId
df_final = | pd.read_csv('../data/preprocessed/df_final.csv') | pandas.read_csv |
"""
The BIGMACC script.
"""
import os
import pandas as pd
import numpy as np
import logging
import xarray as xr
import zarr
from itertools import repeat
import time
import cea.utilities.parallel
logging.getLogger('numba').setLevel(logging.WARNING)
import cea.config
import cea.utilities
import cea.inputlocator
import cea.demand.demand_main
import cea.resources.radiation_daysim.radiation_main
import cea.bigmacc.bigmacc_rules
import cea.bigmacc.wesbrook_DH_single
import cea.bigmacc.wesbrook_DH_multi
import cea.utilities.dbf
import cea.datamanagement.archetypes_mapper
import cea.datamanagement.data_initializer
import cea.analysis.costs.system_costs
import cea.analysis.lca.main
import cea.bigmacc.bigmacc_util as util
__author__ = "<NAME>"
__copyright__ = ""
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__status__ = ""
def generate_building_list(config):
locator = cea.inputlocator.InputLocator(config.scenario)
data = pd.read_csv(locator.get_total_demand())
return np.array(data['Name'])
def hourly_xr_get_hourly_results(config, bldg):
locator = cea.inputlocator.InputLocator(config.scenario)
return pd.read_csv(locator.get_demand_results_file(bldg))
def hourly_xr_create_hourly_results_df(config):
buildings = generate_building_list(config)
interior_temp_dict = dict()
pv_gen_dict = dict()
operative_temp_dict = dict()
district_dhw_dict = dict()
district_heat_dict = dict()
district_cool_dict = dict()
electrical_aux_dict = dict()
electrical_dhw_dict = dict()
electrical_heat_dict = dict()
electrical_cool_dict = dict()
heatloss_rad_dict = dict()
heatgain_solar_dict = dict()
grid_dict = dict()
electrical_appliances_dict = dict()
electrical_ev_dict = dict()
electrical_refrig_dict = dict()
electrical_data_cool_dict = dict()
electrical_ind_process_dict = dict()
electrical_data_dict = dict()
ng_dhw_dict = dict()
ng_heat_dict = dict()
heat_enduse_sys_dict = dict()
heat_enduse_dict = dict()
dhw_enduse_sys_dict = dict()
dhw_enduse_dict = dict()
cool_enduse_sys_dict = dict()
cool_enduse_dict = dict()
for bldg in buildings.tolist():
print(f' - Adding {bldg} to the dataarray.')
data = hourly_xr_get_hourly_results(config, bldg)
interior_temp_dict[bldg] = data['T_int_C'] # 0
pv_gen_dict[bldg] = data['PV_kWh'] # 1
operative_temp_dict[bldg] = data['theta_o_C'] # 2
district_dhw_dict[bldg] = data['DH_ww_kWh'] # 3
district_heat_dict[bldg] = data['DH_hs_kWh'] # 4
district_cool_dict[bldg] = data['DC_cs_kWh'] # 5
electrical_aux_dict[bldg] = data['Eaux_kWh'] # 6
electrical_dhw_dict[bldg] = data['E_ww_kWh'] # 7
electrical_heat_dict[bldg] = data['E_hs_kWh'] # 8
electrical_cool_dict[bldg] = data['E_cs_kWh'] # 9
heatloss_rad_dict[bldg] = data['I_rad_kWh'] # 10
heatgain_solar_dict[bldg] = data['I_sol_kWh'] # 11
grid_dict[bldg] = data['GRID_kWh'] # 12
electrical_appliances_dict[bldg] = data['Eal_kWh'] # 13
electrical_ev_dict[bldg] = data['Ev_kWh'] # 14
electrical_refrig_dict[bldg] = data['E_cre_kWh'] # 15
electrical_data_cool_dict[bldg] = data['E_cdata_kWh'] # 16
electrical_ind_process_dict[bldg] = data['Epro_kWh'] # 17
electrical_data_dict[bldg] = data['Edata_kWh'] # 18
ng_dhw_dict[bldg] = data['NG_ww_kWh'] # 19
ng_heat_dict[bldg] = data['NG_hs_kWh'] # 20
heat_enduse_sys_dict[bldg] = data['Qhs_sys_kWh'] # 21
heat_enduse_dict[bldg] = data['Qhs_kWh'] # 22
dhw_enduse_sys_dict[bldg] = data['Qww_sys_kWh'] # 23
dhw_enduse_dict[bldg] = data['Qww_kWh'] # 24
cool_enduse_sys_dict[bldg] = data['Qcs_sys_kWh'] # 25
cool_enduse_dict[bldg] = data['Qcs_kWh'] # 26
return [interior_temp_dict, pv_gen_dict, operative_temp_dict, district_dhw_dict,
district_heat_dict, district_cool_dict, electrical_aux_dict, electrical_dhw_dict, electrical_heat_dict,
electrical_cool_dict, heatloss_rad_dict, heatgain_solar_dict, grid_dict, electrical_appliances_dict,
electrical_ev_dict, electrical_refrig_dict, electrical_data_cool_dict,
electrical_ind_process_dict, electrical_data_dict, ng_dhw_dict, ng_heat_dict,
heat_enduse_sys_dict, heat_enduse_dict, dhw_enduse_sys_dict, dhw_enduse_dict,
cool_enduse_sys_dict, cool_enduse_dict]
def hourly_xr_get_annual_results(config):
locator = cea.inputlocator.InputLocator(config.scenario)
embodied_carbon_path = locator.get_lca_embodied()
operational_carbon_path = locator.get_lca_operation()
building_tac_path = locator.get_building_tac_file()
supply_syst_path = locator.get_costs_operation_file()
emb_carbon = pd.read_csv(embodied_carbon_path)['GHG_sys_embodied_tonCO2'].sum()
op_carbon_district = pd.read_csv(operational_carbon_path)['GHG_sys_district_scale_tonCO2'].sum()
op_carbon_building = pd.read_csv(operational_carbon_path)['GHG_sys_building_scale_tonCO2'].sum()
build_costs_opex = pd.read_csv(building_tac_path)['opex_building_systems'].sum()
build_costs_capex = pd.read_csv(building_tac_path)['capex_building_systems'].sum()
supply_costs_opex = pd.read_csv(supply_syst_path)['Opex_sys_USD'].sum()
supply_costs_capex = pd.read_csv(supply_syst_path)['Capex_total_sys_USD'].sum()
return [emb_carbon, op_carbon_district, op_carbon_building, build_costs_opex, build_costs_capex, supply_costs_opex,
supply_costs_capex]
def hourly_xr_create_hourly_dataset(config):
scenario = config.general.parent
strategy = config.bigmacc.key
time_arr = pd.date_range("{}-01-01".format(scenario.split('_')[1]), periods=8760, freq="h")
data = hourly_xr_create_hourly_results_df(config)
annual_results = hourly_xr_get_annual_results(config)
print(' - Creating dataset.')
d = xr.Dataset(
data_vars=dict(
interior_temp_C=(["times", "buildings"], pd.DataFrame.from_dict(data[0]).to_numpy()),
pv_generated_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[1]).to_numpy()),
operative_temp_C=(["times", "buildings"], pd.DataFrame.from_dict(data[2]).to_numpy()),
district_dhw_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[3]).to_numpy()),
district_heat_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[4]).to_numpy()),
district_cool_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[5]).to_numpy()),
electric_aux_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[6]).to_numpy()),
electric_dhw_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[7]).to_numpy()),
electric_heating_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[8]).to_numpy()),
electric_cooling_kwh=(["times", "buildings"], pd.DataFrame.from_dict(data[9]).to_numpy()),
radiative_heat_loss_kwh=(["times", "buildings"], | pd.DataFrame.from_dict(data[10]) | pandas.DataFrame.from_dict |
import optuna
import numpy as np
import pandas as pd
from functools import partial
from . import model_bank
import mlflow
from .AAMPreprocessor import AAMPreprocessor
import joblib
from .FastAIutils import *
from .metrics import model_metrics, pretty_scores, get_scores
from loguru import logger
from pathlib import Path
from tabulate import tabulate
import pprint
import random
class ProjectConfigurator:
def __init__(self, config) -> None:
if config:
self.key_attrs = [i for i in dir(config) if not i.startswith('__')]
for key in self.key_attrs:
setattr(self, key, getattr(config, key))
self.create_project_folder()
self.add_logger_path()
self.add_project_config_to_logs(config)
def create_project_folder(self):
self.output_path = Path(self.BASE_OUTPUT_PATH) / Path(self.PROJECT_NAME) / Path(self.SUB_PROJECT_NAME)
self.output_path.mkdir(parents=True, exist_ok=True)
self.models_path = self.output_path / 'models'
self.models_path.mkdir(parents=True, exist_ok=True)
# def copy_config_file(self):
# import shutil
# shutil.copy('config.py', str(self.output_path))
def add_logger_path(self):
logger_name = str(random.randint(0,10000))
self.logger = logger.bind(name = logger_name)
self.logger.add(str(self.output_path/'logfile.log'), filter=lambda record: record["extra"].get("name") == logger_name)
def add_project_config_to_logs(self, config):
bc_attrs = {i : getattr(config, i) for i in self.key_attrs }
self.logger.info('\n'+pprint.pformat(bc_attrs))
class ARKAutoML(AAMPreprocessor):
def __init__(self, data = None, config=None,
n_folds= 5, eval_metric='recall',
n_trials=10, model_algos=['xgb','rf'], loading=False):
self.config = ProjectConfigurator(config)
if not loading:
super().__init__(data, cat_cols=config.cat_cols, cont_cols=config.cont_cols, y_names=config.TARGET_COL,
n_folds=n_folds, fold_method=config.FOLD_METHOD)
self.eval_metric = eval_metric
self.n_trials = n_trials
self.model_algos = model_algos
self.logger = self.config.logger
self.total_features = len(self.cat_cols + self.cont_cols)
self.mpb = model_bank.ModelParamBank(total_features = self.total_features)
def create_optuna_optimization(self):
self.study = optuna.create_study(direction='maximize', study_name=self.config.PROJECT_NAME, load_if_exists=True)
mlflow.set_experiment(self.config.PROJECT_NAME)
optimization_function = partial(self.objective)
self.study.optimize(optimization_function, n_trials=self.n_trials)
def objective(self, trial):
valid_metrics = {}
for fold in range(self.n_folds):
self.mpb = model_bank.ModelParamBank(total_features = self.total_features, trial = trial)
# self.trial_number_model[trial.number] = model_algo
model_algo = trial.suggest_categorical("model_algo", self.model_algos)
model = self.mpb.get_model_with_optuna_params(model_algo)
model.fit(self.X_train[fold], self.y_train[fold])
# train_metrics = self.model_metrics(model, self.X_train[fold], self.y_train[fold])
valid_metrics[fold] = model_metrics(model, self.X_test[fold], self.y_test[fold], self.logger)
cross_validated_metrics = pd.DataFrame(valid_metrics).mean(axis=1).to_dict()
self.logger.info(f'''Trial No : {trial.number}, {self.eval_metric} : {np.round(cross_validated_metrics[self.eval_metric], 4)}, Params : {trial.params}
{pretty_scores(cross_validated_metrics)}''')
with mlflow.start_run():
mlflow.log_params(trial.params)
for fold in range(self.n_folds): mlflow.log_metrics(valid_metrics[fold]) # metrics for each fold
mlflow.log_metrics(cross_validated_metrics) # Adding the cross validated metrics
tags = {
'eval_metric' : self.eval_metric,
'model_type' : 'classification',
'model_algo' : model_algo,
'train_shape' : self.X_train[fold].shape,
'test_shape' : self.X_test[fold].shape,
'sub_project' : self.config.SUB_PROJECT_NAME
}
mlflow.set_tags(tags)
return cross_validated_metrics[self.eval_metric]
@staticmethod
def calculate_metrics_based_on_different_cut_offs(model, X, y, cut_offs):
full_metrics = []
for co in cut_offs:
loop_dict = get_scores(y, np.where(model.predict_proba(X)[:,1]>co, 1, 0))
loop_dict['prob_cut_off'] = co
full_metrics.append(loop_dict)
cols = ['prob_cut_off'] + [i for i in loop_dict.keys() if i!='prob_cut_off'] #Reordering the columns
return | pd.DataFrame(full_metrics) | pandas.DataFrame |
from collections import defaultdict
from datetime import datetime as dt
import base64
import json
import datetime
import streamlit as st
import pandas as pd
import numpy as np
import plotly.graph_objects as go
import plotly.io as pio
import extaedio.amp_consts
import extaedio.amp_st_functs
from extaedio.amp_functs import (
build_plot,
get_plot_help_digest,
get_plot_docstring,
)
def get_final_index(default_index: int, options: list, key: str, overrides: dict) -> int:
if key in overrides:
return options.index(overrides[key])
else:
return default_index
class ParamInitializer(object):
def __init__(self, parent, params_doc, overrides, show_help) -> None:
self._parent = parent
self._params_doc = params_doc
self._overrides = overrides
self._show_help = show_help
def __call__(
self,
param_name,
widget_params,
widget_type="selectbox",
doc_override=None,
lock: bool = False,
):
if self._overrides and lock:
if param_name in self._overrides:
self._parent.markdown(
f"**{param_name}** <- {self._overrides[param_name]}"
)
self._parent.info(f"**{param_name}** is locked in report mode")
ret = self._overrides[param_name]
else:
return None
elif widget_type:
f = getattr(self._parent, widget_type)
if param_name in self._overrides:
if "options" in widget_params and "index" in widget_params:
widget_params["index"] = get_final_index(
default_index=widget_params["index"],
options=widget_params["options"],
key=param_name,
overrides=self._overrides,
)
if "options" in widget_params and "default" in widget_params:
widget_params["default"] = self._overrides[param_name]
elif "value" in widget_params:
widget_params["value"] = self._overrides[param_name]
ret = None if f is None else f(**widget_params)
else:
ret = None
if ret == extaedio.amp_consts.PICK_ONE:
self._parent.warning(
f"Please pic a column for the {widget_params.get('label', 'previous parameter')}."
)
if (self._show_help == "all") or (
(self._show_help == "mandatory") and (ret == extaedio.amp_consts.PICK_ONE)
):
self.print_help(
param_name=param_name,
params_doc=doc_override if doc_override is not None else self._params_doc,
)
return ret
def print_help(self, param_name, params_doc):
if isinstance(params_doc, str):
self._parent.markdown(params_doc)
else:
for k, v in params_doc.items():
p, *_ = k.split(":")
if p == param_name:
self._parent.markdown("".join(v))
break
else:
self._parent.warning(f"Missing doc for {param_name}")
self._parent.markdown("___")
def _max_width_():
max_width_str = f"max-width: 100%;"
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
{max_width_str}
}}
</style>
""",
unsafe_allow_html=True,
)
@st.cache
def wrangle_the_data(df, dw_options):
# Sort
if dw_options["sort_columns"]:
df = df.sort_values(
dw_options["sort_columns"],
ascending=not dw_options["invert_sort"],
)
# Filter columns
df = df[dw_options["kept_columns"]]
# Filter rows
if len(dw_options["filters"]) > 0:
for k, v in dw_options["filters"].items():
df = df[df[k].isin(v)]
# Bin columns
if len(dw_options["binners"]) > 0:
for k, v in dw_options["binners"].items():
df[k] = | pd.cut(df[k], v) | pandas.cut |
import warnings
from datetime import datetime
import pytest
import pandas as pd
from mssql_dataframe.connect import connect
from mssql_dataframe.core import custom_warnings, custom_errors, create, conversion, conversion_rules
from mssql_dataframe.core.write import insert, _exceptions
pd.options.mode.chained_assignment = "raise"
class package:
def __init__(self, connection):
self.connection = connection.connection
self.create = create.create(self.connection)
self.create_meta = create.create(self.connection, include_metadata_timestamps=True)
self.insert = insert.insert(self.connection, autoadjust_sql_objects=True)
self.insert_meta = insert.insert(self.connection, include_metadata_timestamps=True, autoadjust_sql_objects=True)
@pytest.fixture(scope="module")
def sql():
db = connect(database="tempdb", server="localhost")
yield package(db)
db.connection.close()
def test_insert_autoadjust_errors(sql):
table_name = "##test_insert_autoadjust_errors"
# create table with column for each conversion rule
columns = conversion_rules.rules['sql_type'].to_numpy()
columns = {'_'+x:x for x in columns}
sql.create.table(table_name, columns=columns)
# create dataframes for each conversion rule that should fail insert
boolean = [3]
exact_numeric = ['a', '2-1', 1.1, datetime.now()]
approximate_numeric = ['a', '2-1',datetime.now()]
date_time = ['a', 1, 1.1]
character_string = [1, datetime.now()]
dataframe = [
pd.DataFrame({'_bit': boolean}),
pd.DataFrame({'_tinyint': exact_numeric}),
pd.DataFrame({'_smallint': exact_numeric}),
pd.DataFrame({'_int': exact_numeric}),
pd.DataFrame({'_bigint': exact_numeric}),
pd.DataFrame({'_float': approximate_numeric}),
pd.DataFrame({'_time': date_time}),
pd.DataFrame({'_date': date_time}),
pd.DataFrame({'_datetime2': date_time}),
pd.DataFrame({'_varchar': character_string}),
| pd.DataFrame({'_nvarchar': character_string}) | pandas.DataFrame |
import cleaning
import codecs
import pandas as pd
import subprocess
def main():
doc_clean,document_tweets=cleaning.get_clean_docs(0)
file = codecs.open('data.txt', 'w', 'utf-8')
doc_list=[]
print (len(doc_clean))
print(len(document_tweets))
for doc in doc_clean:
document_text=" ".join(doc)
file.write(document_text+'\n')
p = subprocess.Popen(["sh", "runExample.sh"], shell=True)
print(p.communicate())
result_doc=open("C:/Users/brohi/OneDrive/Desktop/BTM-master/output/model/k5.pz_d",'r')
topic=[]
for doc in result_doc.readlines():
doc_dist=doc.split()
topic.append(doc_dist.index(max(doc_dist)))
df = pd.DataFrame({'text':document_tweets[:len(topic )]})
se = | pd.Series(topic) | pandas.Series |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from typing import List
class DataSet(pd.core.frame.DataFrame):
"""A represenation of a dataset
This is basically a pandas dataframe with a set of "metadata" columns
that will be removed when the dataframe is converted to a numpy array
Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
metadata_columns : Array-like
A list of metadata columns that are already contained in the columns parameter.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
DataFrame.from_items : From sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard.
Notes
----
Based on https://notes.mikejarrett.ca/storing-metadata-in-pandas-dataframes/
"""
def __init__(
self,
data=None,
index=None,
columns=None,
metadata_columns=[],
units=None,
dtype=None,
copy=False,
):
# Column multindex level names
level_names = ["NAME", "TYPE"]
if units:
names.append("UNITS")
if isinstance(columns, pd.MultiIndex):
pass
elif columns is not None:
column_names = columns
if metadata_columns:
types = [
"METADATA" if x in metadata_columns else "DATA"
for x in column_names
]
else:
types = ["DATA" for _ in range(len(column_names))]
arrays = [column_names, types]
if units:
arrays.append(units)
tuples = list(zip(*arrays))
columns = pd.MultiIndex.from_tuples(tuples, names=level_names)
pd.core.frame.DataFrame.__init__(
self, data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
@staticmethod
def from_df(df: pd.DataFrame, metadata_columns: List = [], units: List = []):
"""Create Dataset from a pandas dataframe
Arguments
----------
df: pandas.DataFrame
Dataframe to be converted to a DataSet
metadata_columns: list, optional
names of the columns in the dataframe that are metadata columns
units: list, optional
A list of objects representing the units of the columns
"""
column_names = df.columns.to_numpy()
if metadata_columns:
types = [
"METADATA" if x in metadata_columns else "DATA" for x in df.columns
]
else:
types = ["DATA" for _ in range(len(column_names))]
arrays = [column_names, types]
levels = ["NAME", "TYPE"]
if units:
arrays.append(units)
levels.append("UNITS")
tuples = list(zip(*arrays))
columns = pd.MultiIndex.from_tuples(tuples, names=levels)
return DataSet(df.to_numpy(), columns=columns, index=df.index)
@staticmethod
def read_csv(filepath_or_buffer, **kwargs):
"""Create a DataSet from a csv"""
header = kwargs.get("header", [0, 1])
index_col = kwargs.get("index_col", 0)
df = | pd.read_csv(filepath_or_buffer, header=header, index_col=index_col) | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'TEST',
},
],
index='sid'
)
fof14_sid = 10000
futures = pd.DataFrame.from_records(
[
{
'sid': fof14_sid,
'symbol': 'FOF14',
'root_symbol': 'FO',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'FUT',
},
],
index='sid'
)
root_symbols = pd.DataFrame({
'root_symbol': ['FO'],
'root_symbol_id': [1],
'exchange': ['CME'],
})
with tmp_assets_db(
equities=equities, futures=futures, root_symbols=root_symbols) \
as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
fof14 = finder.retrieve_asset(fof14_sid)
cf = finder.create_continuous_future(
root_symbol=fof14.root_symbol, offset=0, roll_style='volume',
)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
# Futures
(finder, 'FOF14', None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, 'FOF14', unique_start, fof14),
# Futures int
(finder, fof14_sid, None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, fof14_sid, unique_start, fof14),
# ContinuousFuture
(finder, cf, None, cf),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
# Futures and Equities
(finder, ['FOF14', 0], None, [fof14, assets[0]]),
# ContinuousFuture and Equity
(finder, [cf, 0], None, [cf, assets[0]]),
)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21AM', tz='UTC'),
exchange='THE MOON',
)
asset3 = Asset(3, exchange="test")
asset4 = Asset(4, exchange="test")
asset5 = Asset(5, exchange="still testing")
def test_asset_object(self):
the_asset = Asset(5061, exchange="bar")
self.assertEquals({5061: 'foo'}[the_asset], 'foo')
self.assertEquals(the_asset, 5061)
self.assertEquals(5061, the_asset)
self.assertEquals(the_asset, the_asset)
self.assertEquals(int(the_asset), 5061)
self.assertEquals(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, exchange="test")
s_24 = Asset(24, exchange="test")
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_str(self):
strd = str(self.future)
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = repr(self.future)
self.assertIn("Future", reprd)
self.assertIn("2468", reprd)
self.assertIn("OMH15", reprd)
self.assertIn("root_symbol=%s'OM'" % ('u' if PY2 else ''), reprd)
self.assertIn(
"notice_date=Timestamp('2014-01-20 00:00:00+0000', tz='UTC')",
reprd,
)
self.assertIn(
"expiration_date=Timestamp('2014-02-20 00:00:00+0000'",
reprd,
)
self.assertIn(
"auto_close_date=Timestamp('2014-01-18 00:00:00+0000'",
reprd,
)
self.assertIn("tick_size=0.01", reprd)
self.assertIn("multiplier=500", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.write_assets(equities=frame)
finder = self.asset_finder
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([
{'symbol': 'PRTY_HRD', 'exchange': "TEST"},
{'symbol': 'BRKA', 'exchange': "TEST"},
{'symbol': 'BRK_A', 'exchange': "TEST"},
])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records(
[
# sid 0
{
'symbol': 'A',
'asset_name': 'Asset A',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'B',
'asset_name': 'Asset B',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
# sid 1
{
'symbol': 'C',
'asset_name': 'Asset C',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'A', # claiming the unused symbol 'A'
'asset_name': 'Asset A',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
],
index=[0, 0, 1, 1],
)
self.write_assets(equities=metadata)
finder = self.asset_finder
# note: these assertions walk forward in time, starting at assertions
# about ownership before the start_date and ending with assertions
# after the end_date; new assertions should be inserted in the correct
# locations
# no one held 'A' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
# no one held 'C' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
# from 01 through 05 sid 0 held 'A'
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(0),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
# from 01 through 05 sid 1 held 'C'
C_result = finder.lookup_symbol('C', asof)
assert_equal(
C_result,
finder.retrieve_asset(1),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
# no one held 'B' before 06
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
# no one held 'C' after 06, however, no one has claimed it yet
# so it still maps to sid 1
assert_equal(
finder.lookup_symbol('C', T('2014-01-07')),
finder.retrieve_asset(1),
)
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
# from 06 through 10 sid 0 held 'B'
# we test through the 11th because sid 1 is the last to hold 'B'
# so it should ffill
B_result = finder.lookup_symbol('B', asof)
assert_equal(
B_result,
finder.retrieve_asset(0),
msg=str(asof),
)
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
# from 06 through 10 sid 1 held 'A'
# we test through the 11th because sid 1 is the last to hold 'A'
# so it should ffill
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(1),
msg=str(asof),
)
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
self.assertEqual(
str(e.exception),
"Ambiguous ownership for 1 symbol, multiple assets held the"
" following symbols:\n"
"MULTIPLE:\n"
" intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'),"
" ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n"
" start_date end_date\n"
" sid \n"
" 1 2010-01-01 2012-01-01\n"
" 2 2010-01-01 2013-01-01\n"
" 3 2011-01-01 2012-01-01"
)
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
"""
If lookup_symbol is vectorized across multiple symbols, and one of them
is None, want to raise a TypeError.
"""
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, | pd.Timestamp('2013-01-01') | pandas.Timestamp |
"""Tests for detection peaks nodes"""
from numbers import Number
import numpy as np
import os
import pandas as pd
import pandas.util.testing as tm
import pytest
from timeflux.helpers.testing import ReadData, Looper
from timeflux_dsp.nodes.peaks import LocalDetect, RollingDetect
@pytest.fixture(scope="module")
def ppg_generator():
"""Create object to mimic data streaming """
# Signal of 300 points (sum of two sinus at 0.5 Hz and 10 Hz) sampled at 50 kHz.
df = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'test_data_ppg.csv'), index_col=None)
df = pd.DataFrame(
index= | pd.to_datetime(df["index"].values) | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
import streamlit as st
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_auc_score, confusion_matrix
from matplotlib.lines import Line2D
sys.path.append('src')
from models import KernelClassifier
# Define parameter names (AUs) and target label (EMOTIONS)
PARAM_NAMES = np.loadtxt('data/au_names_new.txt', dtype=str).tolist()
EMOTIONS = np.array(['anger', 'disgust', 'fear', 'happy', 'sadness', 'surprise'])
SUBS = [str(s).zfill(2) for s in range(1, 61)]
plt.style.use('dark_background')
@st.cache(show_spinner=False)
def _load_data(n_subs=60):
X_all, y_all = [], []
for sub in SUBS[:n_subs]:
data = pd.read_csv(f'data/ratings/sub-{sub}_ratings.tsv', sep='\t', index_col=0)
data = data.query("emotion != 'other'")
data = data.loc[data.index != 'empty', :]
X, y = data.iloc[:, :-2], data.iloc[:, -2]
X_all.append(X)
y_all.append(y)
return X_all, y_all
@st.cache(show_spinner=False)
def _run_analysis(mapp, X_all, y_all, beta, kernel, analysis_type):
ktype = 'similarity' if kernel in ['cosine', 'sigmoid', 'linear'] else 'distance'
model = KernelClassifier(au_cfg=mapp, param_names=PARAM_NAMES, kernel=kernel, ktype=ktype,
binarize_X=False, normalization='softmax', beta=beta)
model.fit(None, None)
scores = np.zeros((len(SUBS), len(EMOTIONS)))
confmat = np.zeros((6, 6))
# Compute model performance per subject!
for i, (X, y) in enumerate(zip(X_all, y_all)):
# Predict data + compute performance (AUROC)
y_pred = model.predict_proba(X)
y_ohe = pd.get_dummies(y).to_numpy()
scores[i, :] = roc_auc_score(y_ohe, y_pred, average=None)
confmat += confusion_matrix(y_ohe.argmax(axis=1), y_pred.argmax(axis=1))
# Store scores and raw predictions
scores = pd.DataFrame(scores, columns=EMOTIONS, index=SUBS).reset_index()
scores = | pd.melt(scores, id_vars='index', value_name='score', var_name='emotion') | pandas.melt |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("display.max_rows", None):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("display.max_rows", 0):
with option_context("display.max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("display.max_rows", None):
with option_context("display.max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_with_column_specific_col_space_raises(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
msg = (
"Col_space length\\(\\d+\\) should match "
"DataFrame number of columns\\(\\d+\\)"
)
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40])
with pytest.raises(ValueError, match=msg):
df.to_string(col_space=[30, 40, 50, 60])
msg = "unknown column"
with pytest.raises(ValueError, match=msg):
df.to_string(col_space={"a": "foo", "b": 23, "d": 34})
def test_to_string_with_column_specific_col_space(self):
df = DataFrame(np.random.random(size=(3, 3)), columns=["a", "b", "c"])
result = df.to_string(col_space={"a": 10, "b": 11, "c": 12})
# 3 separating space + each col_space for (id, a, b, c)
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
result = df.to_string(col_space=[10, 11, 12])
assert len(result.split("\n")[1]) == (3 + 1 + 10 + 11 + 12)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame({"date": [Timestamp("20130101").tz_localize("UTC")] + [NaT] * 5})
with | option_context("display.max_rows", 5) | pandas.option_context |
import pandas as pd
from pandas.testing import assert_frame_equal
import numpy as np
import pytest
from pytest import approx
from pytest import mark
import okama as ok
from .conftest import data_folder
@mark.asset_list
def test_asset_list_init_failing():
with pytest.raises(ValueError, match=r"Assets must be a list."):
ok.AssetList(assets=("RUB.FX", "MCFTR.INDX"))
@mark.asset_list
@mark.usefixtures("_init_asset_list")
class TestAssetList:
def test_repr(self):
value = pd.Series(dict(
assets="[pf1.PF, RUB.FX, MCFTR.INDX]",
currency="USD",
first_date="2019-02",
last_date="2020-01",
period_length="1 years, 0 months",
inflation="USD.INFL"
))
assert repr(self.asset_list_with_portfolio) == repr(value)
def test_len(self):
assert self.asset_list.__len__() == 2
def test_tickers(self):
assert self.asset_list_with_portfolio.tickers == ['pf1', 'RUB', 'MCFTR']
def test_ror(self):
asset_list_sample = | pd.read_pickle(data_folder / "asset_list.pkl") | pandas.read_pickle |
# -*- coding: utf-8 -*-
"""
Applies ResNet50 network to large images dynamically to make decisions about which objects are present.
"""
from threading import Thread
from keras.preprocessing import image as image_utils
from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from matplotlib import pyplot
from scipy.misc import imresize
from skimage import io
import numpy
import cv2
import pandas
#%%
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
#%%
def load_image(filename, scale=1.0):
original = io.imread(filename)
original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)
original = imresize(original, scale)
data = image_utils.img_to_array(original)
display = preprocess_input(original).squeeze()
display -= display.min()
display /= display.max()
return original, data, display
def slice_subimage(image, offset, dimension=(224,224)):
x = int(offset[1] * (image.shape[1] - dimension[1]))
y = int(offset[0] * (image.shape[0] - dimension[0]))
sub = image[y:y + dimension[0], x:x + dimension[1], :].copy()
sub = numpy.expand_dims(sub, axis=0)
sub = preprocess_input(sub)
return sub
def plot_image(ax, image):
scaled_image = image.copy().squeeze()
scaled_image -= scaled_image.min()
scaled_image /= scaled_image.max()
ax.imshow(scaled_image)
#%%
original, data, display = load_image('./Jake.jpg', 0.75)
pyplot.figure(figsize=(12,6))
ax = pyplot.subplot(121)
plot_image(ax, display)
ax = pyplot.subplot(122)
plot_image(ax, slice_subimage(data, (0.5, 0.5)))
pyplot.tight_layout()
#%%
model = ResNet50(weights="imagenet")
predictions = model.predict(slice_subimage(data, (0.5, 0.5)))
decoded = decode_predictions(predictions)
for i in range(5):
print('{} {:.0%}'.format(decoded[0][i][1], decoded[0][i][2]))
pyplot.plot(predictions.transpose())
#%%
class Region:
def __init__(self, offset, bounds, dimension=(224, 224)):
self.offset = offset
self.bounds = bounds
self.dimension = dimension
class SubimageClassifier:
def __init__(self, model):
self.model = model
self.original, self.data, self.display = load_image('./jake.jpg')
self.region = Region((0.5, 0.5), (self.data.shape[0], self.data.shape[1]))
self.subimage = slice_subimage(self.data, self.region.offset)
self.predictions = numpy.zeros(1000)
def load_image(self, filename, scale=1.0):
try:
self.original, self.data, self.display = load_image(filename, scale)
except:
return False
return self.data.shape[0] > self.region.dimension[0] and self.data.shape[1] > self.region.dimension[1]
def classify_subimage(self):
self.region.offset = numpy.random.rand(2)
self.subimage = slice_subimage(self.data, self.region.offset)
self.predictions = self.model.predict(self.subimage)
def show_running_predictions(ax, preds):
ax.imshow(preds.transpose(), aspect='auto', vmin=0, vmax=1)
def plot_top_prediction_classes(ax, preds):
max_preds = preds.max(axis=0).reshape((1, 1000))
order = numpy.argsort(max_preds)
decoded = decode_predictions(max_preds)
for i in range(5):
ax.plot(preds[:,order[0][-1 - i]].transpose(), label=decoded[0][i][1])
ax.legend()
def draw_subimage_border(ax, image, offset, dimension=(224,224)):
x0 = int(offset[1] * (image.shape[1] - dimension[1]))
y0 = int(offset[0] * (image.shape[0] - dimension[0]))
x1 = x0 + dimension[1]
y1 = y0 + dimension[0]
ax.plot([x0, x0, x1, x1, x0], [y0, y1, y1, y0, y0], 'g--')
def update_classifier(classifier, predictions, row):
classifier.classify_subimage()
predictions[row,:] = classifier.predictions
def click_figure(event):
if event.button == 1 and event.inaxes == event.canvas.active_axes:
region = event.canvas.region
region.offset = (event.ydata / region.bounds[0], event.xdata / region.bounds[1])
def close_figure(event):
event.canvas.closed = True
#%%
imageset = | pandas.read_csv('./open_images_urls.csv', delimiter=',') | pandas.read_csv |
import pandas
import pytest
import modin.pandas as pd
import numpy as np
from .utils import test_data_values, test_data_keys, df_equals
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isna(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
pandas_result = pandas.isna(pandas_df)
modin_result = pd.isna(modin_df)
df_equals(modin_result, pandas_result)
modin_result = pd.isna(pd.Series([1, np.nan, 2]))
pandas_result = pandas.isna(pandas.Series([1, np.nan, 2]))
df_equals(modin_result, pandas_result)
assert pd.isna(np.nan) == | pandas.isna(np.nan) | pandas.isna |
import os
from datetime import date
from datetime import datetime
from datetime import timedelta
from pathlib import Path
import dash
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_table
from dash.dependencies import Input, Output, State
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import datetime as dt
# コロナ感染者数データの読み込み
pcr = pd.read_csv('./data/nhk_news_covid19_domestic_daily_data.csv')
pcr['日付'] = pd.to_datetime( pcr['日付'] )
pcr = pcr.rename({'国内の感染者数_1日ごとの発表数':'感染者数'},axis=1)
pcr = pcr[["日付", "感染者数"]]
# styleテンプレ
styles = {
# 'backgroundColor': '#00008b',
'backgroundColor': '#191970',
'textColor': '#ffffff',
'font':'sans-serif'
}
# 初期データフレームの作成
empty_list = ["" for _ in range(365)]
df = pd.DataFrame({"日付": empty_list, "データ": empty_list})
# サンプルデータ
sample1 = pd.read_csv('./data/temperature.csv', header=3, names=['日付','平均気温','品質情報','均質番号'], encoding='shift-JIS')
# 東京の平均気温
# dash
app = dash.Dash(
__name__,
suppress_callback_exceptions=True,
external_stylesheets=[dbc.themes.BOOTSTRAP],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
)
server = app.server
app.title = 'コロナ状況と比較'
app.layout = html.Div(
id='whole',
style={
'backgroundColor': styles['backgroundColor'],
'color':styles['textColor'],
'font-family':styles['font'],
'textAlign':'center',
'font-size':'1.1rem',
'margin':'0',
'padding':'0',
},
children=[
html.Div(
id='header',
style={
'width':'100%',
'height':'70px',
# 'position':'fixed',
'z-index':'10',
'background-color':'#330033',
'border-bottom':'1.5px solid white',
},
children=[
html.A(
href='/compare-with-corona',
style={
'float':'left',
'text-decoration':'None',
'margin':'20px 50px',
'font-size':'1.2rem',
'font-weight':'bold',
'color':'white',
},
children="コロナ状況と比較"
),
],
),
html.H1(
id='title',
style={
'font-size':'60px',
# 'border-bottom':'3px solid white',
'margin':'30px',
'padding':'50px 0 20px 0',
},
children="コロナ状況と比較",
),
html.Div(
id='body-container',
style={
"margin": "0 10%"
},
children=[
html.P(
id='explanation',
style={
'margin':'20px',
'font-size':'1.5rem',
},
children='時系列データを入力すると、国内の新型コロナウイルス感染者数状況との比較ができます。',
),
# テーブル
html.Div(
id='table-container',
style={
'margin':'70px',
},
children=[
html.P(
style={
'margin':'30px'
},
children=dcc.Markdown('''
以下のテーブルに日付、データを入力してください。
手元にデータがない場合、サンプルデータで見てみることもできます。
''')
),
# サンプルで見てみる
html.Div([
dbc.Button(
id="collapse-button4",
children="サンプルデータで見てみる",
color="light",
className="mb-3",
n_clicks=0,
outline=True,
style={'color':'white',},
),
dbc.Collapse(
dbc.Card(dbc.CardBody(
dbc.Button(
id="button-sample1",
children="サンプルデータ: 東京都の平均気温(気象庁より取得)",
color="light",
className="mb-3",
n_clicks=0,
),
style={
'padding':'30px',
'margin':'30px'
},
),
style={'background-color':'#191970',}),
id="collapse4",
is_open=False,
),
]),
# エラーメッセージ
html.P(
id="warning",
style={
'margin':'30px',
'color':'red',
'font-size':'1.2rem',
},
children="",
),
# テーブル
dash_table.DataTable(
id="table",
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict("records"),
editable=True,
page_size=365,
# export_format="csv",
style_table={
"height": "300px",
"overflowY": "auto",
"background-color":"white",
"border":"3px solid black",
},
style_cell={
"minWidth": "95px",
"maxWidth": "95px",
"width": "95px",
"color":"black",
"textAlign": "center",
},
style_cell_conditional = [{
'if': {'column_id': '日付'},
'type': 'datetime',
'format': 'YYYY-mm-dd'
}],
# fixed_rows={"headers": True}, # fixしたいけど、見え方がおかしくなる
),
]
),
# ボタン
html.Div(
[
html.P(
style={
'margin':'30px'
},
children='データが入力できたら、以下のボタンを押してください。'
),
dbc.Button(
id="my_button",
children="グラフを出力",
color="light",
className="mr-1",
size='lg',
),
],
style={
"display": "inline-block",
"width": "100% auto",
"verticalAlign": "top",
"textAlign": "center",
"padding-bottom":"50px",
},
),
# グラフ表示
html.Div(
id='graph_container',
style={
'background-color':'white',
'border':'3px solid black',
'color':'black',
},
children=[
html.H3(
"グラフ表示結果",style={'padding-top':'30px',}
),
dcc.Loading(dcc.Graph(id="my_graph",
style={
'width': '100%',
'height': '700px',
}
),),
]
),
# 統計量レポート
html.Div(
id='report',
style={
'background-color':'white',
'color':'black',
'margin':'100px 30px',
'padding':'30px 30px 60px 30px',
# 'border':'3px solid red',
},
children=[
html.H3(
"統計量レポート"
),
html.Div(
style={
'border':'3px solid black',
'display':'inline-block',
'padding': '30px',
'margin':'20px',
'width':'350px',
'height':'150px',
},
children=[
html.P(
children='入力データとコロナ感染者数との相関係数:',
style={},
),
html.Div(
id='correlation',
children='-',
style={
'font-size':'1.5rem',
},
),
],
),
html.Div(
style={
'border':'3px solid black',
'display':'inline-block',
'padding': '30px',
'margin':'20px',
'width':'350px',
'height':'150px',
},
children=[
html.P(
children='入力データ期間中のコロナ感染者数の平均値:',
style={},
),
html.Div(
id='mean',
children='-',
style={
'font-size':'1.5rem',
},
),
],
),
html.Div(
style={
'border':'3px solid black',
'display':'inline-block',
'padding': '30px',
'margin':'20px',
'width':'350px',
'height':'150px',
},
children=[
html.P(
children='入力データ期間中のコロナ感染者数の中央値:',
style={},
),
html.Div(
id='median',
children='-',
style={
'font-size':'1.5rem',
},
),
],
),
html.Div(
style={
'border':'3px solid black',
'display':'inline-block',
'padding': '30px',
'margin':'20px',
'width':'350px',
'height':'150px',
},
children=[
html.P(
children='入力データ期間中のコロナ感染者数の標準偏差:',
style={},
),
html.Div(
id='stdev',
children='-',
style={
'font-size':'1.5rem',
},
),
],
),
],
),
# 使い方のcollapse
html.Div([
dbc.Button("使い方",
id="collapse-button1",
className="mb-3",
color="Light",
outline=True,
n_clicks=0,
style={'color':'white',
'font-size':'1.5rem',},
),
dbc.Collapse(
dbc.Card(dbc.CardBody(
dcc.Markdown('''
① お手持ちの日次データの日付と値をテーブルにペーストして入力します。
日付は'YYYY-MM-DD','YYYY/MM/DD'などの形式で、値は半角でご入力ください。
「サンプルデータで見てみる」ボタンを押してサンプルデータで試すこともできます。
② 「グラフを出力」ボタンをクリックします。
③ グラフと各種統計量が表示されます。グラフはドラッグして表示範囲を変えることもできます。
'''),
style={'padding':'30px','margin':'30px'},),
style={'background-color':'#191970',}),
id="collapse1",
is_open=False,
),
]),
# データについてのcollapse
html.Div([
dbc.Button("入力データについて",
id="collapse-button2",
className="mb-3",
color="Light",
outline=True,
n_clicks=0,
style={'color':'white',
'font-size':'1.5rem',},
),
dbc.Collapse(
dbc.Card(dbc.CardBody("入力されたデータは保存されず、製作者や第三者がデータを収集、利用することはありません。",
style={
'padding':'30px',
'margin':'30px'},),
style={'background-color':'#191970',}),
id="collapse2",
is_open=False,
),
]),
# 免責事項のcollapse
html.Div([
dbc.Button("免責事項",
id="collapse-button3",
className="mb-3",
color="Light",
outline=True,
n_clicks=0,
style={'color':'white',
'font-size':'1.5rem',},
),
dbc.Collapse(
dbc.Card(dbc.CardBody("当ダッシュボードの利用により利用者に何らかの損害が生じても、製作者は一切責任を負いません。",
style={
'padding':'30px',
'margin':'30px'},),
style={'background-color':'#191970',}),
id="collapse3",
is_open=False,
),
]),
html.A(
id='ref',
href="https://www3.nhk.or.jp/news/special/coronavirus/data-widget/#mokuji0",
target="_blank",
rel="noopener noreferrer",
style={
'color':'white',
'display':'inline-block',
'margin':'100px 0 0 0',
},
children='新型コロナ関連の情報提供:NHK',
),
],
),
# フッター
html.Div(
id='footer',
style={
'padding':'30px',
'margin-top':'50px',
'font-size':'0.9rem',
'padding-top':'50px',
'border-top':'1.5px solid white',
'background-color':'#330033'
},
children=[
dcc.Markdown('''
製作者: kikeda
[twitter](https://twitter.com/kikeda1102)
'''),
html.A(
href="https://github.com/kikeda1102",
target="_blank",
rel="noopener noreferrer",
style={
'color':'white',
"display":"block",
"margin-top":"10px",
},
children=[
html.Br(),
"その他の制作物",
],
),
],
),
],
)
# 入力されたデータに感染者数データを結合し返す関数(日付変換できないときはエラーメッセージも返す)
def merge_pcr(
data: pd.DataFrame, pcr: pd.DataFrame
) -> "[ data2: pd.DataFrame, warning: str ]":
# 入力が適切でなく、日付変換できないときのための例外処理
try:
data['日付'] = pd.to_datetime(data['日付'])
data["データ"] = data["データ"].replace("", np.nan)
data = data.dropna() # 空白を落とす
data["データ"] = data["データ"].map(lambda x: float(x)) # float値にする
data2 = pd.merge(data, pcr, on="日付", how='outer') # データに合わせて結合
warning = ""
except Exception:
data2 = data
warning = "日付、データを適切な形式で入力されているか確かめてください。"
return [data2, warning]
# 入力データ、感染者数をプロットしたfigを返す関数
def dataplot(data: pd.DataFrame) -> go.Figure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=data["日付"], y=data["データ"], name="入力データ"))
fig.add_trace(go.Scatter(x=data["日付"], y=data["感染者数"], name="感染者数", yaxis="y2"))
fig.update_layout(
xaxis=dict(domain=[0.1, 0.9], dtick='M1'),
yaxis=dict(
title="入力データ"
),
yaxis2=dict(
title="感染者数",
anchor="free",
overlaying="y",
side="right",
position=0.9,
),
)
return fig
@app.callback(
Output("my_graph", "figure"),
Output("warning", "children"),
Output("correlation","children"),
Output("mean","children"),
Output("median","children"),
Output("stdev","children"),
Input("my_button", "n_clicks"),
State("table", "columns"),
State("table", "derived_virtual_data"),
)
def update_data(n_clicks, columns, rows):
# テーブルへの入力からデータフレームを作成
df = pd.DataFrame(rows, columns=[c["name"] for c in columns])
df = df.replace("", np.nan)
df = df.dropna()
data, warning = merge_pcr(df, pcr)
# エラーが出ているときは何も返さない
if warning == "日付、データを適切な形式で入力してください。":
fig = dash.no_update
corr = "-"
mean = "-"
median = "-"
stdev = "-"
else:
fig = dataplot(data)
# 統計量レポート
data2 = data.dropna()
if len(data2) != 0:
corr = round( data2["データ"].corr(data2["感染者数"]), 4)
mean = round( data2["感染者数"].mean() )
median = round( data2["感染者数"].median() )
stdev = round( data2["感染者数"].std(), 1 )
else:
corr = "-"
mean = "-"
median = "-"
stdev = "-"
return fig, warning, corr, mean, median, stdev
# collapse
@app.callback(
Output("collapse1", "is_open"),
[Input("collapse-button1", "n_clicks")],
[State("collapse1", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(
Output("collapse2", "is_open"),
[Input("collapse-button2", "n_clicks")],
[State("collapse2", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(
Output("collapse3", "is_open"),
[Input("collapse-button3", "n_clicks")],
[State("collapse3", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(
Output("collapse4", "is_open"),
[Input("collapse-button4", "n_clicks")],
[State("collapse4", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# サンプルデータ
@app.callback(
Output("table","data"),
[Input("button-sample1", "n_clicks")],
)
def sample_output(n):
if n:
empty_list = ["" for _ in range(1095)]
df = | pd.DataFrame({"日付": empty_list, "データ": empty_list}) | pandas.DataFrame |
from sklearn.datasets import load_boston
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import pickle
X, y = load_boston(return_X_y=True)
X = pd.DataFrame(data=X, columns=load_boston().feature_names)
y = | pd.DataFrame(data=y, columns=["Price"]) | pandas.DataFrame |
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import json
import dash_core_components as dcc
def create_line_plot_dash(data, x_axis_title, y_axis_title, x_range=None):
return dcc.Graph(id='timeseries',
config={'displayModeBar': False},
animate=True,
figure={
'data': data,
'layout': get_plotly_layout(x_axis_title, y_axis_title, x_range=x_range)
})
def create_scatter_and_line_plot_dash(fig):
return dcc.Graph(figure=fig)
def create_line_plot(x=None, y=None, line_name=None, line_color=None, line_width=1.5, show_legend=True):
if x is None:
x = np.arange(1, len(y) + 1, 1)
df = pd.DataFrame({'x': x, 'y': y}) # creating a sample dataframe
data_line = go.Line(
x=df['x'], # assign x as the dataframe column 'x'
y=df['y'],
line={
'width': line_width
},
showlegend=show_legend
)
if line_name is not None:
data_line['name'] = line_name
if line_color is not None:
data_line['line']['color'] = line_color
return json.dumps([data_line], cls=plotly.utils.PlotlyJSONEncoder)
def create_line_plot_fill(x=None, y=None, line_name=None, line_color=None, line_width=1.5, show_legend=True):
if x is None:
x = np.arange(1, len(y) + 1, 1)
df = | pd.DataFrame({'x': x, 'y': y}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 18:17:07 2021
@author: alber
"""
import os
import pandas as pd
import numpy as np
import itertools
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pickle
import lightgbm as lgb
from os import walk
from scipy import stats
from statsmodels.stats.power import TTestIndPower
from sklearn import preprocessing
from sklearn.semi_supervised import (
LabelPropagation,
LabelSpreading,
SelfTrainingClassifier,
)
from common.config import (
PATH_POEMS, PATH_RESULTS, PATH_AFF_LEXICON, PATH_GROUND_TRUTH
)
df_metrics_h_test = pd.DataFrame()
### Sample Size
# parameters for power analysis
effect = 0.8
alpha = 0.1 # Ojo al alpha, que no es 0.5
power = 0.8
# perform power analysis
analysis = TTestIndPower()
result = analysis.solve_power(effect, power=power, nobs1=None, ratio=1.0, alpha=alpha)
print('Sample Size: %.3f' % result)
df_kappa_limits = pd.DataFrame(
{
'limit_k': [0, 0.2, 0.4],
'category': ['poor', 'slight', 'fair']
}
)
# =============================================================================
# Best Models based on CV
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
### Load CV - Psychological
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = pd.DataFrame()
i = 0
for folder in f_folders:
i += 1
filenames = next(walk(f_path + f'/{folder}'), (None, None, []))[2]
df_iter = pd.concat([
pd.read_csv(f_path + f'/{folder}' + '/' + x, encoding="latin-1") for x in filenames
])
df_iter['iter'] = folder
df_results_aff_cv = df_results_aff_cv.append(
df_iter
)
df_raw = df_results_aff_cv
df_raw = (
df_raw
.replace("Aversión", "Aversión")
.replace("Depresión", "Depresión")
.replace('Dramatización', "Dramatización")
.replace('Ilusión', "Ilusión")
.replace("Desilusión", "Desilusión")
.replace("Obsesión", "Obsesión")
.replace("Compulsión", "Compulsión")
.replace("Ensoñación", "Ensoñación")
.replace("Idealización", "Idealización")
.dropna(subset=['category'])
.drop(columns=['category', 'en_name'], errors="ignore")
)
df_raw = df_raw.merge(df_names, how="left").round(2)
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
df_raw = (
df_raw
.merge(df_names, how="left")
.drop(columns=['es_name'])
)
### Get the metrics per emotion tag
df_results_aff = (
df_raw
.groupby(by=['category', 'regression_model', 'semantic_model'])
.mean()
.reset_index()
)
df_results_aff['mean_metric'] = (
(df_results_aff['kappa']+
df_results_aff['auc'])
/
2
)
df_median_ref = (
df_results_aff
.groupby(by=['regression_model', 'semantic_model'])
.median()
.reset_index()
.copy()
[['regression_model', 'semantic_model', 'f1_weighted', 'kappa', 'auc', 'corr']]
.rename(columns={
'f1_weighted': 'f1_weighted_median',
'kappa': 'kappa_median',
'auc': 'auc_median',
'corr': 'corr_median'
})
)
df_results_aff = df_results_aff[df_results_aff['auc']>0.5]
df_results_aff = df_results_aff[df_results_aff.fillna(0)['corr']>=0]
# Remove baselines
df_results_aff = df_results_aff[
(df_results_aff['regression_model'] != 'class_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'class_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_knn') &
(df_results_aff['regression_model'] != 'class_label_spreading_base_rbf') &
(df_results_aff['regression_model'] != 'class_dummy_classifier') &
(df_results_aff['regression_model'] != 'reg_baseline_lightgbm') &
(df_results_aff['regression_model'] != 'reg_baseline_smote_lightgbm') &
(df_results_aff['regression_model'] != 'reg_label_spreading_base') &
(df_results_aff['regression_model'] != 'reg_dummy_classifier')
].copy()
# Remove unused semantic models
list_semantic_models = [
'enc_text_model1',
'enc_text_model2',
'enc_text_model3',
'enc_text_model4',
'enc_text_model5',
# 'enc_text_model_hg_bert_max',
# 'enc_text_model_hg_bert_span',
# 'enc_text_model_hg_bert_median',
'enc_text_model_hg_bert_avg_w',
# 'enc_text_model_hg_bert_sp_max',
# 'enc_text_model_hg_bert_sp_span',
# 'enc_text_model_hg_bert_sp_median',
'enc_text_model_hg_bert_sp_avg_w',
# 'enc_text_model_hg_ro_max',
# 'enc_text_model_hg_ro_span',
# 'enc_text_model_hg_ro_median',
# 'enc_text_model_hg_ro_avg_w'
]
df_results_aff = df_results_aff[
df_results_aff['semantic_model'].isin(list_semantic_models)]
df_results_aff = (
df_results_aff
.sort_values(by=['category', 'mean_metric'], ascending=False)
.groupby(by=['category'])
.first()
.reset_index()
)
df_results_aff = (
df_results_aff.merge(df_names, how="left").drop(columns=['es_name'])
)
df_results = df_results_aff[[
'en_name', 'semantic_model', 'regression_model',
'f1_weighted', 'kappa', 'auc', 'corr'
]].copy().round(2)
df_reference = df_results
# df_reference = df_results[[
# 'en_name', 'semantic_model', 'classification_model',
# 'f1_weighted', 'kappa', 'auc'
# ]].copy().round(2)
df_reference = df_reference.merge(df_median_ref, how="left")
### Add data distribution
# Load psycho names
df_names = pd.read_csv(f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
list_names = list(df_names["es_name"].values)
list_aff = [
"concreteness",
"context availability",
"anger",
"arousal",
"disgust",
"fear",
"happinness",
"imageability",
"sadness",
"valence",
]
list_kfolds = []
n_folds = 21
for i in range(n_folds):
df_gt = pd.read_csv(f"{PATH_GROUND_TRUTH}/poems_corpus_all.csv")
df_gt = df_gt.rename(columns={"text": "text_original"})
df_gt.columns = [str(x).rstrip().lstrip() for x in list(df_gt.columns)]
df_add = pd.DataFrame()
for category in list_names:
if category in list_aff:
continue
try:
df_iter = df_gt.groupby(category).apply(lambda s: s.sample(2))
except:
continue
df_add = df_add.append(df_iter)
df_add = df_add.drop_duplicates()
# New GT (without data used in training)
df_gt = df_gt[~df_gt["index"].isin(df_add["index"])].copy()
## Check no affective feature categories are missing
for category in list_aff:
l1 = list(df_add[category].unique())
l2 = list(df_gt[category].unique())
if len(l1)<len(l2):
l3 = [x for x in l2 if x not in l1]
df_add_new = df_gt[df_gt[category].isin(l3)]
df_add_new = df_add_new.drop_duplicates(subset=category)
df_add = df_add.append(df_add_new)
df_gt = df_gt[~df_gt["index"].isin(df_add_new["index"])].copy()
list_kfolds.append([{i: {'df_gt': df_gt, 'df_add': df_add}}])
df_distribution = pd.DataFrame()
for iter_item in list_kfolds:
iter_item = [x for x in iter_item[0].values()][0]['df_gt']
for category in list_aff:
data_cat = (
pd.DataFrame(iter_item[category].copy().value_counts())
.T
.reset_index()
.rename(columns={'index':'en_name'})
)
df_distribution = df_distribution.append(data_cat)
df_distribution = df_distribution.groupby(by=['en_name']).mean().reset_index().round(1)
df_distribution = df_distribution.replace("fear", "Fear (ordinal)")
df_distribution = df_distribution.replace("happinness", "happiness")
df_reference = df_distribution.merge(df_reference)
df_reference.round(2).to_csv(
"tables_paper/df_results_emotions_reference.csv", index=False)
# =============================================================================
# Differences vs. Baselines
# =============================================================================
# ENG names
df_names = pd.read_csv(
f"{PATH_GROUND_TRUTH}/variable_names_en.csv", encoding="latin-1")
df_names['category'] = df_names['es_name']
# Load best combinations
df_reference = pd.read_csv("tables_paper/df_results_emotions_reference.csv")
list_semantic_models = list(set(df_reference['semantic_model'].values))
list_prediction_models = list(set(df_reference['regression_model'].values))
list_categories = list(set(df_reference['en_name'].values))
### Load CV - Emotions
f_path = f"{PATH_RESULTS}/results_cv/emotion_aff_full"
f_folders = next(walk(f_path), (None, None, []))[1] # [] if no file
df_results_aff_cv = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
### Import Packages
import pandas as pd
import numpy as np
import elasticsearch
import re
import json
from datetime import datetime
from pprint import pprint
import timeit
# Define elasticsearch class
es = elasticsearch.Elasticsearch()
### Helper Functions
# convert np.int64 into int. json.dumps does not work with int64
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int64):
return np.int(obj)
# else
return json.JSONEncoder.default(self, obj)
# Convert datestamp into ISO format
def str_to_iso(text):
if text != '':
for fmt in ('%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d'):
try:
return datetime.isoformat(datetime.strptime(text, fmt))
except ValueError:
pass
raise ValueError('no valid date format found')
else:
return None
# Custom groupby function
def concatdf(x):
if len(x) > 1: #if multiple values
return list(x)
else: #if single value
return x.iloc[0]
### Import Data
# Load projects, resources & donations data
projects = | pd.read_csv('./data/opendata_projects.csv', index_col=None, escapechar='\\') | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime as dt
import os
from enum import Enum
from sklearn import preprocessing
from collections import deque
import random
import tensorflow as tf
import time
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, BatchNormalization
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
def classify(current: float, future: float) -> int:
if future > current:
return 1
return 0
# here we basically preprocess the data
# ... in order to have some proper training data
def preprocess_df(df):
df = df.drop("Future", 1) # fuck that future shit
for c in df.columns:
if c != "Target":
df[c] = df[c].pct_change()
df.dropna(inplace=True)
df[c] = preprocessing.scale(df[c].values)
df.dropna(inplace=True)
sequential_data = []
prev_data = deque(maxlen=SEQ_PERIOD)
for v in df.values:
prev_data.append([n for n in v[:-1]])
if len(prev_data) == SEQ_PERIOD:
sequential_data.append([np.array(prev_data), v[-1]])
random.shuffle(sequential_data)
buys = []
sells = []
for seq, target in sequential_data:
if target == 0: # sell
sells.append([seq, target])
elif target == 1:
buys.append([seq, target])
random.shuffle(buys)
random.shuffle(sells)
lower = min(len(buys), len(sells))
buys = buys[:lower]
sells = sells[:lower]
sequential_data = buys + sells
random.shuffle(sequential_data)
X = []
y = []
for seq, target in sequential_data:
X.append(seq)
y.append(target)
return np.array(X), y
main_df = pd.DataFrame()
ratios = ["BTC-USD", "LTC-USD", "ETH-USD", "BCH-USD"]
SEQ_PERIOD = 60 # units to look at
FUTURE_PERIOD = 3 # units
CHOSEN_CRYPTO = "LTC-USD"
EPOCHS = 10
BATCH_SIZE = 64
NAME = f"Zubinator-{SEQ_PERIOD}-{FUTURE_PERIOD}@{int(time.time())}"
for r in ratios:
data_path = F"crypto/crypto_data/{ r }.csv"
df = | pd.read_csv(data_path, names=["time", "low", "high", "open", "close", "volume"]) | pandas.read_csv |
import pandas as pd
import sklearn
import joblib
import argparse
import sys
import pkg_resources
import os
from csv import DictWriter
import covizu
from covizu.utils import seq_utils
from covizu.minimap2 import minimap2, stream_fasta
class Pangolin:
def __init__(self, header_file, model_file):
model_headers = joblib.load(header_file)
self.indices = model_headers[1::] # list of integers
self.model = joblib.load(model_file)
self.categories = ['A', 'C', 'G', 'T', 'N', '-']
def classify(self, seq):
""" Assign genome to one or more lineages """
# convert sequence into list
seqlist = [nt if nt in 'ACGT-' else 'N' for i, nt in enumerate(seq) if i in self.indices]
df = pd.DataFrame([seqlist], columns=self.indices)
# add extra rows to ensure all categories are represented
for nt in self.categories:
df.loc[len(df)] = [nt] * len(self.indices)
df = | pd.get_dummies(df, columns=self.indices) | pandas.get_dummies |
import pandas as pd
import pytest
from pyspark.sql import SparkSession
def test_stoys_init():
df = | pd.DataFrame([{"k": "1", "v": 42.0, "e": "foo"}]) | pandas.DataFrame |
from qutip import *
from ..mf import *
import pandas as pd
from scipy.interpolate import interp1d
from copy import deepcopy
import matplotlib.pyplot as plt
def ham_gen_jc(params, alpha=0):
sz = tensor(sigmaz(), qeye(params.c_levels))
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
ham = (params.fc-params.fd)*a.dag()*a
ham += params.eps*(a+a.dag())
ham += 0.5*(params.f01-params.fd)*sz
ham += params.g*(a*sm.dag() + a.dag()*sm)
ham *= 2*np.pi
return ham
def c_ops_gen_jc(params, alpha=0):
c_ops = []
sm = tensor(sigmam(), qeye(params.c_levels))
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
if params.gamma > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*(1+params.n_t))*sm)
if params.n_t > 0:
c_ops.append(np.sqrt(2*np.pi*params.gamma*params.n_t)*sm.dag())
if params.gamma_phi > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.gamma_phi)*sm.dag()*sm)
if params.kappa > 0.0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*(1+params.n_c))*a)
if params.n_c > 0:
c_ops.append(np.sqrt(2*np.pi*params.kappa*params.n_c)*a.dag())
return c_ops
def iterative_alpha_calc(params, n_cycles=10, initial_alpha=0):
alpha = initial_alpha
try:
for idx in range(n_cycles):
ham = ham_gen_jc(params, alpha=alpha)
c_ops = c_ops_gen_jc(params, alpha=alpha)
rho = steadystate(ham, c_ops)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha
a_exp = expect(a, rho)
alpha = a_exp
except:
alpha = None
return alpha
class Spectrum:
def __init__(self, parameters):
print('hello')
self.parameters = deepcopy(parameters)
self.mf_amplitude = None
self.me_amplitude = None
self.transmission_exp = None
self.hilbert_params = None
def iterative_calculate(self, fd_array, initial_alpha=0, n_cycles=10, prune=True):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
params = deepcopy(self.parameters)
fd_array = np.sort(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
else:
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
fd_array = np.flip(fd_array)
a_array = np.zeros(fd_array.shape[0], dtype=complex)
alpha = initial_alpha
for fd_idx, fd in tqdm(enumerate(fd_array)):
params.fd = fd
alpha = iterative_alpha_calc(params, initial_alpha=alpha, n_cycles=n_cycles)
a_array[fd_idx] = alpha
if change is 'hard':
alpha_dim_iterative = pd.Series(a_array, index=fd_array, name='alpha_dim')
else:
alpha_bright_iterative = pd.Series(a_array, index=fd_array, name='alpha_bright')
if prune:
alpha_dim_iterative = alpha_dim_iterative.dropna()
alpha_bright_iterative = alpha_bright_iterative.dropna()
alpha_dim_iterative.sort_index(inplace=True)
alpha_bright_iterative.sort_index(inplace=True)
if change is 'hard':
# alpha_dim_diff = np.diff(alpha_dim_iterative)/np.diff(alpha_dim_iterative.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
first_dim_idx = np.argmax(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[first_dim_idx:]
# alpha_bright_diff = np.diff(alpha_bright_iterative) / np.diff(alpha_bright_iterative.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
last_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[:last_bright_idx + 1]
else:
first_bright_idx = np.argmin(alpha_bright_iterative.imag)
alpha_bright_iterative = alpha_bright_iterative.iloc[first_bright_idx:]
last_dim_idx = np.argmin(alpha_dim_iterative.real)
alpha_dim_iterative = alpha_dim_iterative.iloc[:last_dim_idx+1]
self.iterative_amplitude = pd.concat([alpha_dim_iterative, alpha_bright_iterative], axis=1)
def gen_raw_hilbert_params(self, fd_array, c_levels):
self.hilbert_params = pd.DataFrame(np.zeros([fd_array.shape[0], 1]), index=fd_array, columns=['alpha_0'])
self.hilbert_params['c_levels'] = c_levels
def gen_iterative_hilbert_params(self, fd_limits, kind='linear', fill_value='extrapolate', fraction=0.5,
level_scaling=1.0, max_shift=False, max_levels=True, relative='dim', relative_crossover=None, c_levels_bistable=None):
if self.parameters.fc < self.parameters.f01:
change = 'hard'
else:
change = 'soft'
alpha_dim = self.iterative_amplitude['alpha_dim'].dropna()
# alpha_dim.sort_index(inplace=True)
# alpha_dim_diff = np.diff(alpha_dim)/np.diff(alpha_dim.index)
# first_dim_idx = np.argmax(np.abs(alpha_dim_diff)) + 1
# alpha_dim = alpha_dim.iloc[first_dim_idx:]
alpha_bright = self.iterative_amplitude['alpha_bright'].dropna()
# alpha_bright.sort_index(inplace=True)
# alpha_bright_diff = np.diff(alpha_bright) / np.diff(alpha_bright.index)
# last_bright_idx = np.argmax(np.abs(alpha_bright_diff))
# alpha_bright = alpha_bright.iloc[:last_bright_idx]
new_iterative_alphas = pd.concat([alpha_dim, alpha_bright], axis=1)
self.iterative_amplitude = new_iterative_alphas
alpha_dim_real_func = interp1d(alpha_dim.index, alpha_dim.real, kind=kind, fill_value=fill_value)
alpha_dim_imag_func = interp1d(alpha_dim.index, alpha_dim.imag, kind=kind, fill_value=fill_value)
def alpha_dim_func_single(fd):
alpha_dim = alpha_dim_real_func(fd) + 1j * alpha_dim_imag_func(fd)
return alpha_dim
alpha_dim_func_vec = np.vectorize(alpha_dim_func_single)
def alpha_dim_func(fd_array):
alpha_dim_array = alpha_dim_func_vec(fd_array)
alpha_dim_series = pd.Series(alpha_dim_array, index=fd_array, name='alpha_dim_func')
return alpha_dim_series
alpha_bright_real_func = interp1d(alpha_bright.index, alpha_bright.real, kind=kind,
fill_value=fill_value)
alpha_bright_imag_func = interp1d(alpha_bright.index, alpha_bright.imag, kind=kind,
fill_value=fill_value)
def alpha_bright_func_single(fd):
alpha_bright = alpha_bright_real_func(fd) + 1j * alpha_bright_imag_func(fd)
return alpha_bright
alpha_bright_func_vec = np.vectorize(alpha_bright_func_single)
def alpha_bright_func(fd_array):
alpha_bright_array = alpha_bright_func_vec(fd_array)
alpha_bright_series = pd.Series(alpha_bright_array, index=fd_array, name='alpha_bright')
return alpha_bright_series
alpha_dim_interp = alpha_dim_func(self.iterative_amplitude.index)
alpha_bright_interp = alpha_bright_func(self.iterative_amplitude.index)
alpha_diff_interp = (alpha_bright_interp - alpha_dim_interp).dropna()
if max_shift:
min_diff = np.min(np.abs(alpha_diff_interp))
alpha_diff_unit_interp = alpha_diff_interp / np.abs(alpha_diff_interp)
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * min_diff * alpha_diff_unit_interp
elif relative is 'both':
if change is 'soft':
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[relative_crossover:] = alpha_bright_interp[relative_crossover:] - fraction * min_diff * alpha_diff_unit_interp[relative_crossover:]
else:
alpha_0_interp = alpha_dim_interp + fraction * min_diff * alpha_diff_unit_interp
alpha_0_interp[:relative_crossover] = alpha_bright_interp[:relative_crossover] - fraction * min_diff * alpha_diff_unit_interp[:relative_crossover]
else:
raise Exception('Relative is neither bright, dim nor both.')
else:
if relative is 'dim':
alpha_0_interp = alpha_dim_interp + fraction * alpha_diff_interp
elif relative is 'bright':
alpha_0_interp = alpha_bright_interp - fraction * alpha_diff_interp
else:
raise Exception('Relative is neither bright norm dim.')
alpha_diff_interp.name = 'alpha_diff'
alpha_0_interp.name = 'alpha_0'
hilbert_params = pd.concat([alpha_diff_interp, alpha_0_interp], axis=1)
if max_levels:
if c_levels_bistable is not None:
hilbert_params['c_levels'] = c_levels_bistable
else:
min_diff = np.min(np.abs(alpha_diff_interp))
hilbert_params['c_levels'] = np.int(np.ceil(level_scaling * min_diff ** 2))
else:
hilbert_params['c_levels'] = np.ceil(level_scaling * np.abs(alpha_diff_interp.values) ** 2).astype(int)
hilbert_params['c_levels'].loc[:fd_limits[0]] = self.parameters.c_levels
hilbert_params['c_levels'].loc[fd_limits[1]:] = self.parameters.c_levels
if change is 'hard':
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_bright'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_dim'].loc[fd_limits[1]:]
else:
hilbert_params['alpha_0'].loc[:fd_limits[0]] = self.iterative_amplitude['alpha_dim'].loc[:fd_limits[0]]
hilbert_params['alpha_0'].loc[fd_limits[1]:] = self.iterative_amplitude['alpha_bright'].loc[fd_limits[1]:]
# hilbert_params = pd.concat([hilbert_params, alpha_dim_interp, alpha_bright_interp], axis=1)
self.alpha_dim_interp = alpha_dim_interp
self.alpha_bright_interp = alpha_bright_interp
self.alpha_diff_interp = alpha_diff_interp
self.hilbert_params = hilbert_params
self.completed = np.zeros(hilbert_params.index.shape[0])
self.attempted = np.zeros(hilbert_params.index.shape[0])
a_array = np.zeros(hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude = pd.DataFrame(a_array, index=hilbert_params.index)
def mf_calculate(self, fd_array, characterise_only=False):
if self.mf_amplitude is None:
self.mf_amplitude = map_mf_jc(self.parameters, fd_array=fd_array, characterise_only=characterise_only)
else:
fd0 = fd_array[0]
fd1 = fd_array[-1]
idx0 = self.mf_amplitude.index.get_loc(fd0, method='nearest')
idx1 = self.mf_amplitude.index.get_loc(fd1, method='nearest')
alpha0_dim = self.mf_amplitude['a_dim'].iloc[idx0]
sm0_dim = self.mf_amplitude['sm_dim'].iloc[idx0]
sz0_dim = self.mf_amplitude['sz_dim'].iloc[idx0]
alpha0_bright = self.mf_amplitude['a_bright'].iloc[idx1]
sm0_bright = self.mf_amplitude['sm_bright'].iloc[idx1]
sz0_bright = self.mf_amplitude['sz_bright'].iloc[idx1]
mf_amplitude_new = mf_characterise_jc(self.parameters, fd_array, alpha0_bright=alpha0_bright,
sm0_bright=sm0_bright, sz0_bright=sz0_bright, alpha0_dim=alpha0_dim,
sm0_dim=sm0_dim, sz0_dim=sz0_dim, check_bistability=False)
self.mf_amplitude = pd.concat([self.mf_amplitude, mf_amplitude_new])
self.mf_amplitude = self.mf_amplitude.sort_index()
self.mf_amplitude = self.mf_amplitude[~self.mf_amplitude.index.duplicated(keep='first')]
def generate_hilbert_params(self, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, max_shift=True,
c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0'):
print(c_levels_bi)
self.hilbert_params = generate_hilbert_params(self.mf_amplitude, c_levels_bi_scale=c_levels_bi_scale,
scale=scale, fd_limits=fd_limits, kind=kind,
max_shift=max_shift, c_levels_mono=c_levels_mono,
c_levels_bi=c_levels_bi, alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, method=method)
def me_calculate(self, solver_kwargs={}, c_levels_bi_scale=1.0, scale=0.5, fd_limits=None, fill_value='extrapolate',
max_shift=False, c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0, kind='linear',
method='extrapolate_alpha_0', level_scaling=1.0, max_levels=True, save_name=None, resume_uncompleted=True):
if self.hilbert_params is None:
if method is 'iterative':
frequencies = self.iterative_amplitude.index
self.gen_iterative_hilbert_params(fd_limits, kind=kind, fill_value=fill_value, fraction=scale,
level_scaling=level_scaling, max_shift=max_shift, max_levels=max_levels)
else:
frequencies = self.mf_amplitude.index
self.generate_hilbert_params(c_levels_bi_scale=c_levels_bi_scale, scale=scale, max_shift=max_shift,
c_levels_mono=c_levels_mono, c_levels_bi=c_levels_bi,
alpha_0_mono=alpha_0_mono,
alpha_0_bi=alpha_0_bi, fd_limits=fd_limits, kind=kind, method=method)
if self.me_amplitude is None:
self.completed = np.zeros(self.hilbert_params.index.shape[0])
self.attempted = np.zeros(self.hilbert_params.index.shape[0])
a_array = np.zeros(self.hilbert_params.index.shape[0], dtype=complex)
self.me_amplitude = pd.DataFrame(a_array, index=self.hilbert_params.index)
frequencies = self.hilbert_params.index
a_array = self.me_amplitude.values[:,0]
params = deepcopy(self.parameters)
for fd_idx, fd, alpha0, c_levels in tqdm(
zip(np.arange(self.hilbert_params.index.shape[0]), self.hilbert_params.index,
self.hilbert_params['alpha_0'], self.hilbert_params['c_levels'])):
if (resume_uncompleted and self.completed[fd_idx] == 0) or (not resume_uncompleted and self.attempted[fd_idx] == 0):
params.fd = fd
params.c_levels = c_levels
ham = ham_gen_jc(params, alpha=alpha0)
c_ops = c_ops_gen_jc(params, alpha=alpha0)
self.attempted[fd_idx] = 1
try:
rho = steadystate(ham, c_ops, **solver_kwargs)
a = tensor(qeye(2), destroy(params.c_levels)) + alpha0
a_array[fd_idx] = expect(rho, a)
self.me_amplitude = pd.DataFrame(a_array, index=frequencies)
if save_name is not None:
qsave(self, save_name)
self.completed[fd_idx] = 1
except:
print('Failure at fd = ' + str(fd))
a_array[fd_idx] = np.nan
def plot(self, axes=None, mf=True, me=True, db=True, me_kwargs={'marker': 'o'}, mf_kwargs={'marker': 'o'}):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_xlabel(r'$f_d$ (GHz)')
axes.set_ylabel(r'|$\langle a \rangle$|')
if db:
if me:
if self.me_amplitude is not None:
axes.plot(self.me_amplitude.dropna().index, 20 * np.log10(np.abs(self.me_amplitude.dropna())),
**me_kwargs)
if mf:
if self.mf_amplitude.shape[1] == 1:
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a'])), **mf_kwargs)
else:
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a_bright'])),
**mf_kwargs)
axes.plot(self.mf_amplitude.index, 20 * np.log10(np.abs(self.mf_amplitude['a_dim'])), **mf_kwargs)
else:
if me:
if self.me_amplitude is not None:
axes.plot(self.me_amplitude.dropna().index, np.abs(self.me_amplitude.dropna()), **me_kwargs)
if mf:
if self.mf_amplitude.shape[1] == 1:
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a']), **mf_kwargs)
else:
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a_bright']), **mf_kwargs)
axes.plot(self.mf_amplitude.index, np.abs(self.mf_amplitude['a_dim']), **mf_kwargs)
def plot_transmission(self, axes=None, scale=4.851024710399999e-09, exp=True, sim=True, me_kwargs={'marker': 'o'},
mf_kwargs={'marker': 'o'}):
if axes is None:
fig, axes = plt.subplots(1, 1, figsize=(10, 6))
axes.set_ylabel(r'$T_{NA}$ (dB)')
axes.set_xlabel(r'$f_{df}$ (GHz)')
if sim and self.me_amplitude is not None:
self.transmission = scale * np.abs(self.me_amplitude.dropna()) ** 2 / self.parameters.eps ** 2
axes.plot(self.transmission.index, 10 * np.log10(self.transmission), label='Sim', **me_kwargs)
if exp and self.transmission_exp is not None:
axes.plot(self.transmission_exp.index, self.transmission_exp, label='Exp')
def load_exp(self, path):
self.transmission_exp = pd.read_csv(path, dtype=float, header=None).T
self.transmission_exp = self.transmission_exp.set_index(0)
def generate_hilbert_params(mf_amplitude, fd_limits=None, scale=0.5, c_levels_mono=10, c_levels_bi=10, alpha_0_mono=0, alpha_0_bi=0,
c_levels_bi_scale=1.0, max_shift=True, kind='linear', method='extrapolate_alpha_0'):
if 'a_dim' not in mf_amplitude.columns:
hilbert_params = deepcopy(mf_amplitude)
hilbert_params.columns = ['alpha_0']
hilbert_params['c_levels'] = c_levels_mono
elif method is 'static':
n_frequencies = mf_amplitude.shape[0]
hilbert_params = pd.DataFrame(alpha_0_mono*np.ones([n_frequencies,1],dtype=complex), columns=['alpha_0'], index=mf_amplitude.index)
hilbert_params['c_levels'] = c_levels_mono
if fd_limits is not None:
hilbert_params['c_levels'][fd_limits[0]:fd_limits[1]] = c_levels_bi
hilbert_params['alpha_0'][fd_limits[0]:fd_limits[1]] = alpha_0_bi
else:
mf_amplitude_bistable = mf_amplitude.dropna()
bistable_frequencies = mf_amplitude_bistable.index
alpha_diff_bistable = mf_amplitude_bistable['a_bright'] - mf_amplitude_bistable['a_dim']
alpha_diff_bistable_min = np.min(np.abs(alpha_diff_bistable))
alpha_dim_bistable = mf_amplitude_bistable['a_dim']
if max_shift:
alpha_diff_bistable_unit = alpha_diff_bistable / np.abs(alpha_diff_bistable)
alpha_0_bistable = alpha_dim_bistable + scale * alpha_diff_bistable_min * alpha_diff_bistable_unit
else:
alpha_0_bistable = alpha_dim_bistable + scale * alpha_diff_bistable
if fd_limits is not None:
if method not in ['extrapolate_alpha_0', 'extrapolate_diff']:
raise Exception('Method not recognised.')
bistable_frequencies = mf_amplitude[fd_limits[0]:fd_limits[1]].index
if method is 'extrapolate_alpha_0':
alpha_0_bistable_re_func = interp1d(alpha_0_bistable.index, alpha_0_bistable.values.real,
fill_value='extrapolate', kind=kind)
alpha_0_bistable_im_func = interp1d(alpha_0_bistable.index, alpha_0_bistable.values.imag,
fill_value='extrapolate', kind=kind)
def alpha_0_bistable_func_single(fd):
return alpha_0_bistable_re_func(fd) + 1j * alpha_0_bistable_im_func(fd)
alpha_0_bistable_func = np.vectorize(alpha_0_bistable_func_single, otypes=[complex])
alpha_0_bistable = alpha_0_bistable_func(bistable_frequencies)
alpha_0_bistable = pd.Series(alpha_0_bistable, index=bistable_frequencies)
elif method is 'extrapolate_diff':
diff_re_func = interp1d(alpha_diff_bistable.index, alpha_diff_bistable.values.real,
fill_value='extrapolate', kind=kind)
diff_im_func = interp1d(alpha_diff_bistable.index, alpha_diff_bistable.values.imag,
fill_value='extrapolate', kind=kind)
def diff_func_single(fd):
return diff_re_func(fd) + 1j * diff_im_func(fd)
diff_func = np.vectorize(diff_func_single, otypes=[complex])
upper_mf_bistable_fd = mf_amplitude.dropna().index[-1]
if fd_limits[1] < upper_mf_bistable_fd or fd_limits[0] > upper_mf_bistable_fd:
raise Exception('Frequency range does not cover the upper bistability crossover.')
lower_midpoint_frequencies = mf_amplitude[fd_limits[0]:upper_mf_bistable_fd].index
diff_lower = diff_func(lower_midpoint_frequencies)
diff_lower_unit = diff_lower / np.abs(diff_lower)
alpha_dim_lower = mf_amplitude['a_dim'][lower_midpoint_frequencies]
alpha_0_lower = alpha_dim_lower + scale * alpha_diff_bistable_min * diff_lower_unit
alpha_0_lower = pd.Series(alpha_0_lower, index=lower_midpoint_frequencies)
upper_midpoint_frequencies = mf_amplitude[upper_mf_bistable_fd:fd_limits[1]].index[1:]
diff_upper = diff_func(upper_midpoint_frequencies)
diff_upper_unit = diff_upper / np.abs(diff_upper)
alpha_bright_upper = mf_amplitude['a_bright'][upper_midpoint_frequencies]
alpha_0_upper = alpha_bright_upper + (scale - 1) * alpha_diff_bistable_min * diff_upper_unit
alpha_0_upper = pd.Series(alpha_0_upper, index=upper_midpoint_frequencies)
alpha_0_bistable = pd.concat([alpha_0_lower, alpha_0_upper])
fd_lower = alpha_0_bistable.index[0]
fd_upper = alpha_0_bistable.index[-1]
alpha_0_monostable_bright = mf_amplitude['a_bright'].dropna().loc[fd_upper:]
alpha_0_monostable_bright = alpha_0_monostable_bright.iloc[1:]
alpha_0_monostable_dim = mf_amplitude['a_dim'].dropna().loc[:fd_lower]
alpha_0_monostable_dim = alpha_0_monostable_dim.iloc[:-1]
hilbert_params_mono = pd.concat(
[alpha_0_monostable_bright.to_frame('alpha_0'), alpha_0_monostable_dim.to_frame('alpha_0')])
hilbert_params_mono['c_levels'] = c_levels_mono
hilbert_params_bi = alpha_0_bistable.to_frame('alpha_0')
hilbert_params_bi['c_levels'] = int(np.ceil(c_levels_bi_scale * alpha_diff_bistable_min ** 2))
hilbert_params = | pd.concat([hilbert_params_mono, hilbert_params_bi]) | pandas.concat |
import numpy as _np
import pandas as _pd
import logging as _logging
from .gn_io.sinex import _get_snx_matrix, _get_snx_vector, get_variance_factor
from .gn_transform import get_helmert7, transform7
from .gn_io.common import path2bytes
def cova2neq(cova:_np.ndarray, variance_factor):
"""Function to convert COVA matrix to NEQ matrix as per Bernese ADDNEQ"""
neqm = _np.linalg.inv(cova / variance_factor)
# neqv = neqm @ (vec.EST.values - vec.APR.values)
# return neqm, neqv
return neqm
def corr2cova(corr:_np.ndarray) -> _np.ndarray:
"""Converts sinex CORR matrix to COVA using the diagonal STD values"""
D = corr.diagonal() * corr.diagonal()[:,None]
_np.fill_diagonal(corr,1) # fill COVA diagonal with 1 so we only multiply with D to get COVA
return corr * D
def get_neq(path_or_bytes):
snx_bytes = path2bytes(path_or_bytes)
# TODO read and parse sinex header
neq = _get_snx_matrix(path_or_bytes=snx_bytes, stypes=["NEQ"], verbose=False)
vec = b"" # to silence the pylance
if neq is not None:
vec = _get_snx_vector(
path_or_bytes=snx_bytes, stypes=["APR", "EST", "NEQ"], verbose=False,snx_format=None
)
# revisit this vec thing
return neq[0][0], vec # NEQ matrix and vector are present so just return
# return N and N_vec if they exist, else -> magic
_logging.warning(
msg="No NEQ was found. Generating from COVA/CORR as not strict"
)
apr_est = _get_snx_matrix(path_or_bytes=snx_bytes, stypes=["APR", "EST"], verbose=False)
if apr_est is not None:
matrices, stype_dict = apr_est
else:
raise ValueError
variance_factor = get_variance_factor(path_or_bytes)
if variance_factor is None:
variance_factor = 1
_logging.warning(
msg="No variance factor found. Considering it 1"
)
a_e = _get_snx_vector(path_or_bytes = snx_bytes, stypes=["EST", "APR"], verbose=False,snx_format=None)
if a_e is None:
raise ValueError
if not 'APR' in stype_dict.keys():
std = a_e.STD.values
mat_apr = _np.identity(std.shape[0])
_np.fill_diagonal(mat_apr,std*std)
neq_apr = cova2neq(mat_apr,variance_factor)
else:
if stype_dict['APR'] == 'CORR':
neq_apr = cova2neq(corr2cova(matrices[list(stype_dict.keys()).index('APR')]),variance_factor=variance_factor)
elif stype_dict['APR'] == 'COVA': # K_constr
neq_apr = cova2neq(matrices[list(stype_dict.keys()).index('APR')],variance_factor=variance_factor)
elif stype_dict['APR'] == 'INFO': # N_constr
neq_apr = matrices[list(stype_dict.keys()).index('APR')]
else:
raise ValueError
if stype_dict['EST'] == 'CORR':
neq_est = cova2neq(corr2cova(matrices[list(stype_dict.keys()).index('EST')]),variance_factor=variance_factor)
elif stype_dict['EST'] == 'COVA': # K_xx
print('cova2neq')
neq_est = cova2neq(corr2cova(matrices[list(stype_dict.keys()).index('EST')]),variance_factor=variance_factor)
elif stype_dict['EST'] == 'INFO': # N_total
neq_est = matrices[list(stype_dict.keys()).index('EST')]
else:
raise ValueError
neq = neq_est - neq_apr # N_total - N_constr
neqv = neq @ (a_e.EST.values - a_e.APR.values) # (a.APR + _np.linalg.solve(a=neqm,b=neqv)) - a.EST # to check
a_e['NEQ'] = neqv
return neq, a_e
def neq_elim_dim(neq_mat: _np.ndarray, neq_vec: _np.ndarray, i: int):
"""Eliminates 'i' dimension in NEQ system"""
elim_row = neq_mat[i][_np.newaxis]
elim_col = neq_mat[:, i]
elim_centr = elim_col[i]
neq_mat -= (
elim_row * (elim_col / elim_centr)[:, _np.newaxis]
) # division done on 1dim vector first
neq_vec -= elim_col * neq_vec[i] / elim_centr
def prepare_neq(neq_m, vec_apr_neq, frame_of_day):
"""Eliminate the non-XYZ parameters from the NEQ system and align a priori XYZ values to the frame of choice if frame_of_day is given"""
neq_mat = neq_m.copy()
vec_apr_neq = vec_apr_neq.copy()
neq_vec = vec_apr_neq.NEQ.values
xyz_mask = _np.isin(vec_apr_neq.TYPE.values, ["STAX", "STAY", "STAZ"])
idx2elim = vec_apr_neq.index.values[~xyz_mask]
# neq_mat,neq_vec = neq, neq_vec
for i in range(
idx2elim.shape[0]
): # need to be positive and sorted from bigger to smaller
neq_elim_dim(neq_mat, neq_vec, idx2elim[i])
neq_mat_elim = neq_mat[xyz_mask, :][:, xyz_mask]
neq_vec_elim = neq_vec[xyz_mask]
vec_apr_neq = vec_apr_neq[xyz_mask]
aprioris = vec_apr_neq.APR
index_combo = _pd.MultiIndex.from_arrays(
[vec_apr_neq.CODE.values + "_" + vec_apr_neq.PT.values.astype(str), vec_apr_neq.TYPE]
)
aprioris.index = index_combo
aprioris = aprioris.unstack(level=1)
aprioris_missing_mask = aprioris.STAX.values == 0
aprioris_vals_mask = ~aprioris_missing_mask
if aprioris_missing_mask.sum() > 0:
estimates = vec_apr_neq.EST
estimates.index = index_combo
aprioris[aprioris_missing_mask] += estimates.unstack(level=1).values[aprioris_missing_mask]
if frame_of_day is not None:
common = _np.intersect1d(aprioris[aprioris_vals_mask].index.values, frame_of_day.index.values)
hlm = get_helmert7(pt1=frame_of_day.loc[common].iloc[:, :3].values, pt2=aprioris[aprioris_vals_mask].loc[common].values) # could not work if the order of XYZ is changed to YXZ etc
# copy over estimate values to 0-aprioris here and rotate them using the computed hlm coeff
new_aprioris = _pd.DataFrame(data = transform7(xyz_in=aprioris.values, helmert_list=hlm[0][0] * -1),
index = aprioris.index, columns = aprioris.columns)
else:
new_aprioris = aprioris # we later use substr with a mask and fill_value 0 to make all masked values 0
d_apr = new_aprioris.subtract(aprioris[aprioris_vals_mask],fill_value=0).stack()[index_combo].values # the aprioris index is sorted, so need to align with original index
neq_vec_elim -= neq_mat_elim @ d_apr
vec_apr_neq[["APR", "NEQ"]] = _np.vstack([new_aprioris.stack()[index_combo].values, neq_vec_elim]).T # index_combo is needed to preserve the order that may have been changed by stack()
return neq_mat_elim, vec_apr_neq
def insert_neq(master_neq, master_vec_neq, ind, neq, neq_vec):
bool_arr = _np.zeros(shape=master_neq.shape[0], dtype=bool)
bool_arr[ind] = 1
mask = bool_arr[_np.newaxis] * bool_arr[:, _np.newaxis]
master_neq[mask] += neq.flatten() # check the flatten order. Could change to +=1
master_vec_neq[
bool_arr
] += neq_vec # Could chage to +=1 for number of solutions plot
def vec2comboind(vec):
return vec.CODE.values + vec.PT.values + vec.TYPE.values.astype(str)
def get_uniind(vec_list):
buf = []
for vec in vec_list:
buf.append(
vec.set_index(
vec.CODE.values + vec.PT.values.astype(str) + vec.TYPE.values.astype(str)
).APR
)
combo_uni = (
| _pd.concat(buf, axis=1) | pandas.concat |
import xgboost as xgb
import numpy as np
import pandas as pd
from scipy.stats import t, norm
import math
from scipy.special import gammaln, digamma, polygamma
from xgboostlss.utils import *
np.seterr(all="ignore")
########################################################################################################################
################################################# BCT ##########################################################
########################################################################################################################
# When a custom objective is provided XGBoost doesn't know its response function so the user is responsible for making
# the transformation for both objective and custom evaluation metric. For objective with identity link like squared
# error this is trivial, but for other link functions like log link or inverse link the difference is significant.
# For the Python package, the behaviour of the predictions can be controlled by the output_margin parameter in the
# predict function. When using the custom_metric parameter without a custom objective, the metric function will receive
# transformed predictions since the objective is defined by XGBoost. However, when a custom objective is also provided
# along with a custom metric, then both the objective and custom metric will receive raw predictions and hence must be
# transformed using the specified response functions.
class BCT():
"""Box-Cox t (BCT) Distribution Class
"""
###
# Specifies the number of distributional parameters
###
@staticmethod
def n_dist_param():
"""Number of distributional parameter.
"""
n_param = 4
return n_param
###
# Parameter Dictionary
###
@staticmethod
def param_dict():
""" Dictionary that holds the name of distributional parameter and their corresponding response functions.
"""
param_dict = {"location": soft_plus,
"scale": soft_plus,
"nu": identity,
"tau": soft_plus
}
return param_dict
###
# Inverse Parameter Dictionary
###
@staticmethod
def param_dict_inv():
""" Dictionary that holds the name of distributional parameter and their corresponding link functions.
"""
param_dict_inv = {"location_inv": soft_plus_inv,
"scale_inv": soft_plus_inv,
"nu_inv": identity,
"tau_inv": soft_plus_inv
}
return param_dict_inv
###
# Starting Values
###
@staticmethod
def initialize(y: np.ndarray):
""" Function that calculates the starting values, for each distributional parameter individually.
y: np.ndarray
Data from which starting values are calculated.
"""
loc_fit = np.nanmean(y)
scale_fit = np.max([((np.nanvar(y, ddof=1) - np.nanmean(y)) / (np.nanmean(y) ** 2)), 0.1])
nu_fit = 0.5
tau_fit = 10
location_init = BCT.param_dict_inv()["location_inv"](loc_fit)
scale_init = BCT.param_dict_inv()["scale_inv"](scale_fit)
nu_init = BCT.param_dict_inv()["nu_inv"](nu_fit)
tau_init = BCT.param_dict_inv()["tau_inv"](tau_fit)
start_values = np.array([location_init, scale_init, nu_init, tau_init])
return start_values
###
# Density Function
###
@staticmethod
def dBCCG(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, log=False):
"""Helper density function.
"""
if len(nu) > 1:
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
elif nu != 0:
z = (((y / location) ** nu - 1) / (nu * scale))
else:
z = np.log(y / location) / scale
loglik = nu * np.log(y / location) - np.log(scale) - (z ** 2) / 2 - np.log(y) - (np.log(2 * np.pi)) / 2
loglik = loglik - np.log(norm.cdf(1 / (scale * np.abs(nu))))
if log == False:
ft = np.exp(loglik)
else:
ft = loglik
return ft
@staticmethod
def dBCT(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, log=False):
"""Density function.
"""
if len(nu) > 1:
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
elif nu != 0:
z = (((y / location) ** nu - 1) / (nu * scale))
else:
z = np.log(y / location) / scale
loglik = (nu - 1) * np.log(y) - nu * np.log(location) - np.log(scale)
fTz = gammaln((tau + 1) / 2) - gammaln(tau / 2) - 0.5 * np.log(tau) - gammaln(0.5)
fTz = fTz - ((tau + 1) / 2) * np.log(1 + (z * z) / tau)
loglik = loglik + fTz - np.log(t.cdf(1 / (scale * np.abs(nu)), df=tau))
if len(tau) > 1:
loglik = np.where(tau > 1e+06, BCT.dBCCG(y, location, scale, nu, log=True), loglik)
elif tau > 1e+06:
loglik = BCT.dBCCG(y, location, scale, nu, log=True)
ft = np.exp(loglik) if log == False else loglik
return ft
###
# Quantile Function
###
@staticmethod
def qBCT(p: float, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, lower_tail=True,
log_p=False):
"""Quantile function.
"""
if log_p == True:
p = np.exp(p)
else:
p = p
if lower_tail == True:
p = p
else:
p = 1 - p
if len(nu) > 1:
z = np.where((nu <= 0), t.ppf(p * t.cdf(1 / (scale * np.abs(nu)), tau), tau),
t.ppf(1 - (1 - p) * t.cdf(1 / (scale * abs(nu)), tau), tau))
else:
z = t.ppf(p * t.cdf(1 / (scale * np.abs(nu)), tau), tau) if nu <= 0 else t.ppf(
1 - (1 - p) * t.cdf(1 / (scale * abs(nu)), tau), tau)
if len(nu) > 1:
ya = np.where(nu != 0, location * (nu * scale * z + 1) ** (1 / nu), location * np.exp(scale * z))
elif nu != 0:
ya = location * (nu * scale * z + 1) ** (1 / nu)
else:
ya = location * np.exp(scale * z)
return ya
###
# Random variable generation
###
def rBCT(n: int, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray):
"""Random variable generation function.
"""
n = math.ceil(n)
p = np.random.uniform(0, 1, n)
r = BCT.qBCT(p, location=location, scale=scale, nu=nu, tau=tau)
return r
###
# Location Parameter gradient and hessian
###
@staticmethod
def gradient_location(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Gradient of location parameter.
"""
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
w = (tau + 1) / (tau + z ** 2)
grad = (w * z) / (location * scale) + (nu / location) * (w * (z ** 2) - 1)
grad = stabilize_derivative(grad, BCT.stabilize)
grad = grad * (-1) * weights
return grad
@staticmethod
def hessian_location(location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Hessian of location parameter.
"""
hes = -(tau + 2 * nu * nu * scale * scale * tau + 1) / (tau + 3)
hes = hes / (location * location * scale * scale)
hes = stabilize_derivative(hes, BCT.stabilize)
hes = hes * (-1) * weights
return hes
###
# Scale Parameter gradient and hessian
###
@staticmethod
def gradient_scale(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Gradient of scale parameter.
"""
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
w = (tau + 1) / (tau + z ** 2)
h = t.pdf(1 / (scale * np.abs(nu)), df=tau) / t.cdf(1 / (scale * np.abs(nu)), df=tau)
grad = (w * (z ** 2) - 1) / scale + h / (scale ** 2 * np.abs(nu))
grad = stabilize_derivative(grad, BCT.stabilize)
grad = grad * (-1) * weights
return grad
@staticmethod
def hessian_scale(scale: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Hessian of scale parameter.
"""
hes = -2 * tau / (scale ** 2 * (tau + 3))
hes = stabilize_derivative(hes, BCT.stabilize)
hes = hes * (-1) * weights
return hes
###
# Nu Parameter gradient and hessian
###
@staticmethod
def gradient_nu(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Gradient of nu parameter.
"""
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
w = (tau + 1) / (tau + z ** 2)
h = t.pdf(1 / (scale * np.abs(nu)), df=tau) / t.cdf(1 / (scale * np.abs(nu)), df=tau)
grad = ((w * z ** 2) / nu) - np.log(y / location) * (w * z ** 2 + ((w * z) / (scale * nu)) - 1)
grad = grad + np.sign(nu) * h / (scale * nu ** 2)
grad = stabilize_derivative(grad, BCT.stabilize)
grad = grad * (-1) * weights
return grad
@staticmethod
def hessian_nu(scale: np.ndarray, weights: np.ndarray):
"""Calculates Hessian of nu parameter.
"""
hes = -7 * (scale ** 2) / 4
hes = stabilize_derivative(hes, BCT.stabilize)
hes = hes * (-1) * weights
return hes
###
# Tau Parameter gradient and hessian
###
@staticmethod
def gradient_tau(y: np.ndarray, location: np.ndarray, scale: np.ndarray, nu: np.ndarray, tau: np.ndarray, weights: np.ndarray):
"""Calculates Gradient of tau parameter.
"""
z = np.where(nu != 0, (((y / location) ** nu - 1) / (nu * scale)), np.log(y / location) / scale)
w = (tau + 1) / (tau + z ** 2)
j = (np.log(t.cdf(1 / (scale * np.abs(nu)), df=tau + 0.01)) - np.log(
t.cdf(1 / (scale * abs(nu)), df=tau))) / 0.01
grad = -0.5 * np.log(1 + (z ** 2) / tau) + (w * (z ** 2)) / (2 * tau)
grad = grad + 0.5 * digamma((tau + 1) / 2) - 0.5 * digamma(tau / 2) - 1 / (2 * tau) - j
grad = stabilize_derivative(grad, BCT.stabilize)
grad = grad * (-1) * weights
return grad
@staticmethod
def hessian_tau(tau: np.ndarray, weights: np.ndarray):
"""Calculates Hessian of tau parameter.
"""
hes = polygamma(1, ((tau + 1) / 2)) - polygamma(1, tau / 2) + 2 * (tau + 5) / (tau * (tau + 1) * (tau + 3))
hes = hes / 4
hes = np.where(hes < -1e-15, hes, -1e-15)
hes = stabilize_derivative(hes, BCT.stabilize)
hes = hes * (-1) * weights
return hes
###
# Custom Objective Function
###
def Dist_Objective(predt: np.ndarray, data: xgb.DMatrix):
"""A customized objective function to train each distributional parameter using custom gradient and hessian.
"""
target = data.get_label()
# When num_class!= 0, preds has shape (n_obs, n_dist_param).
# Each element in a row represents a raw prediction (leaf weight, hasn't gone through response function yet).
preds_location = BCT.param_dict()["location"](predt[:, 0])
preds_scale = BCT.param_dict()["scale"](predt[:, 1])
preds_nu = BCT.param_dict()["nu"](predt[:, 2])
preds_tau = BCT.param_dict()["tau"](predt[:, 3])
# Weights
if data.get_weight().size == 0:
# Use 1 as weight if no weights are specified
weights = np.ones_like(target, dtype=float)
else:
weights = data.get_weight()
# Initialize Gradient and Hessian Matrices
grad = np.zeros(shape=(len(target), BCT.n_dist_param()))
hess = np.zeros(shape=(len(target), BCT.n_dist_param()))
# Location
grad[:, 0] = BCT.gradient_location(y=target,
location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
weights=weights)
hess[:, 0] = BCT.hessian_location(location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
weights=weights)
# Scale
grad[:, 1] = BCT.gradient_scale(y=target,
location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
weights=weights)
hess[:, 1] = BCT.hessian_scale(scale=preds_scale,
tau=preds_tau,
weights=weights)
# Nu
grad[:, 2] = BCT.gradient_nu(y=target,
location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
weights=weights)
hess[:, 2] = BCT.hessian_nu(scale=preds_scale,
weights=weights)
# Tau
grad[:, 3] = BCT.gradient_tau(y=target,
location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
weights=weights)
hess[:, 3] = BCT.hessian_tau(tau=preds_tau,
weights=weights)
# Reshaping
grad = grad.flatten()
hess = hess.flatten()
return grad, hess
###
# Custom Evaluation Metric
###
def Dist_Metric(predt: np.ndarray, data: xgb.DMatrix):
"""A customized evaluation metric that evaluates the predictions using the negative log-likelihood.
"""
target = data.get_label()
# Using a custom objective function, the custom metric receives raw predictions which need to be transformed
# with the corresponding response function.
preds_location = BCT.param_dict()["location"](predt[:, 0])
preds_scale = BCT.param_dict()["scale"](predt[:, 1])
preds_nu = BCT.param_dict()["nu"](predt[:, 2])
preds_tau = BCT.param_dict()["tau"](predt[:, 3])
nll = -np.nansum(BCT.dBCT(y=target,
location=preds_location,
scale=preds_scale,
nu=preds_nu,
tau=preds_tau,
log=True)
)
return "NegLogLikelihood", nll
###
# Function for drawing random samples from predicted distribution
###
def pred_dist_rvs(pred_params: pd.DataFrame, n_samples: int, seed: int):
"""
Function that draws n_samples from a predicted response distribution.
pred_params: pd.DataFrame
Dataframe with predicted distributional parameters.
n_samples: int
Number of sample to draw from predicted response distribution.
seed: int
Manual seed.
Returns
-------
pd.DataFrame with n_samples drawn from predicted response distribution.
"""
pred_dist_list = []
for i in range(pred_params.shape[0]):
pred_dist_list.append(BCT.rBCT(n=n_samples,
location=np.array([pred_params.loc[i, "location"]]),
scale=np.array([pred_params.loc[i, "scale"]]),
nu=np.array([pred_params.loc[i, "nu"]]),
tau=np.array([pred_params.loc[i, "tau"]])
)
)
pred_dist = | pd.DataFrame(pred_dist_list) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_transform.ipynb (unless otherwise specified).
__all__ = ['TBStep', 'noop_step', 'TBTransform', 'drop_features', 'remove_outlier', 'boxnwhisker_value', 'subset',
'app_cat', 'dummies', 'scale_vars', 'fill_na', 'select', 'apply_function', 'noop_transform']
# Cell
from .utils import *
import pandas as pd
import numpy as np
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.preprocessing import StandardScaler
from sklearn_pandas import DataFrameMapper
import warnings
import sklearn
from sklearn.exceptions import DataConversionWarning
import pdb
# Cell
#todo: only apply for some features
class TBStep:
def __init__(self, **kargs): pass
def fit(self, *args,**kargs): pass
def transform(self, df, **kargs): pass
def fit_transform(self, df):
self.fit(df)
return self.transform(df)
class noop_step(TBStep):
def transform(self, df): return df
class TBTransform:
def __init__(self, steps):
self.steps = steps
self.features = None
def __repr__(self):
return '\n'.join([str(pos) + ' - '+ str(step) for pos, step in enumerate(self.steps)])
def fit(self, df):
for step in self.steps: step.fit(df)
def transform(self, df):
df = df.copy()
for step in self.steps: df = step.transform(df)
if self.features is None:
self.features = df.columns
self.cons = []; self.cats = []
for feature, value in df.items():
if np.array_equal(np.sort(value.unique()), np.array([0, 1])) or np.array_equal(np.sort(value.unique()), np.array([0])): self.cats.append(feature)
else: self.cons.append(feature)
return df
def append(self, steps): self.steps.append(steps)
def insert(self, index, steps): self.steps.insert(index, steps)
def pop(self, n_pop): self.steps.pop(n_pop)
noop_transform = TBTransform([noop_step()])
class drop_features(TBStep):
def __init__(self, features = None):
self.features = features
def __repr__(self):
print_features = ', '.join(to_iter(self.features))
return f'drop {print_features}'
def fit(self, df, tfms_out):
tfms_out['features'] = [i for i in df.columns if i not in self.features]
tfms_out['cons'] = [i for i in tfms_out['cons'] if i not in self.features]
def transform(self, df): return df.drop(self.features, axis=1)
class remove_outlier(TBStep):
def __init__(self, features = None):
self.features = features
def __repr__(self):
print_features = ', '.join(to_iter(self.features))
return f'remove outlier of {print_features}'
def fit(self, df):
self.bw_dict = {}
if self.features is None: self.features = df.columns
mask = np.full(df.shape[0], True)
for feature, value in df[self.features].items():
if is_numeric_dtype(value):
self.bw_dict[feature] = {}
Min, _, _, _, Max, _ = boxnwhisker_value(value)
inlier = np.logical_and(value >= Min, value <= Max)
mask = np.logical_and(mask, inlier)
self.mask = mask
def transform(self, df): return df[self.mask]
def boxnwhisker_value(values):
Median = np.median(values)
Q1, Q3 = np.percentile(values, [25,75])
IQR = Q3 - Q1
Min, Max = Q1 - IQR*1.5, Q3 + IQR*1.5
return max(Min, np.min(values)), Q1, Median, Q3, min(Max,np.max(values)), IQR
class subset(TBStep):
def __init__(self, n_sample = None, ratio = 0.3):
self.n_sample = n_sample
self.ratio = ratio
def __repr__(self): return f'select subset with {self.n_sample} samples'
def fit(self, df):
if self.n_sample is None: self.n_sample = self.ratio*df.shape[0]
def transform(self, df): return df.sample(self.n_sample)
class app_cat(TBStep):
def __init__(self, max_n_cat=15, features = None):
self.max_n_cat = max_n_cat
self.features = features
def __repr__(self): return f'apply category with maximum number of distinct value is {self.max_n_cat}'
def fit(self, df):
if self.features is None: self.features = df.columns
self.app_cat_dict = {}
for feature, value in df[self.features].items():
if is_numeric_dtype(value) and value.dtypes != np.bool:
if value.nunique()<=self.max_n_cat:
if not np.array_equal(value.unique(), np.array([0, 1])):
self.app_cat_dict[feature] = self.as_category_as_order
else:
if value.nunique()>self.max_n_cat: self.app_cat_dict[feature] = self.as_category_as_codes
elif value.dtypes.name == 'object': self.app_cat_dict[feature] = self.as_category_as_order
elif value.dtypes.name == 'category': self.app_cat_dict[feature] = self.cat_as_order
@staticmethod
def cat_as_order(x): return x.cat.as_ordered()
@staticmethod
def as_category_as_codes(x): return x.astype('category').cat.codes+1
@staticmethod
def as_category_as_order(x): return x.astype('category').cat.as_ordered()
def transform(self, df):
df = df.copy()
for key in self.app_cat_dict.keys(): df[key] = self.app_cat_dict[key](df[key])
return df
class dummies(TBStep):
def __init__(self, dummy_na = True):
self.dummy_na = dummy_na
def __repr__(self): return 'get dummies'
def transform(self, df):
df = df.copy()
df = pd.get_dummies(df, dummy_na=True)
return df
class scale_vars(TBStep):
def __init__(self, features = None):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
self.features= features
def __repr__(self): return 'scale features'
def fit(self, df):
if self.features is None: self.features = df.columns
self.features = [i for i in self.features if is_numeric_dtype(df[i])]
map_f = [([n],StandardScaler()) for n in df[self.features].columns]
self.mapper = DataFrameMapper(map_f).fit(df[self.features].dropna(axis=0))
def transform(self, df):
df = df.copy()
df[self.mapper.transformed_names_] = self.mapper.transform(df[self.features])
return df
class fill_na(TBStep):
def __init__(self, features = None):
self.na_dict = {}
self.features = features
def __repr__(self):
return 'fill na'
def fit(self, df):
if self.features is None: self.features = df.columns
for feature in self.features:
if is_numeric_dtype(df[feature].values):
if | pd.isnull(df[feature]) | pandas.isnull |
from bokeh.embed import components
from bokeh.resources import CDN
from collections import defaultdict
from datetime import datetime as dt
from datetime import timedelta as td
from highcharts import Highchart
from io import BytesIO
from numpy import exp, cos, linspace, random, array
from pandas_highcharts.core import serialize, json_encode
import base64
import bokeh.plotting as bplt
import json
import matplotlib.pyplot as plt
import mpld3
import os, re, time, glob
import pandas as pd
import string
def read_header(filename):
header = {}
skiprows = 0
with open(filename, 'r') as fh:
for line in fh:
skiprows += 1
if re.match(r'ddd_hh:mm:ss(.*)', line, re.M | re.I):
break
if ':' in line:
key, value = line.replace('#','').strip().split(':', 1)
header[key.replace('(', '').replace(
')', '')] = value.strip().replace('"', '')
if header['Output Filename'].startswith('power'):
header_rows = 2
if header['Output Filename'].startswith('data'):
header_rows = 3
header['skiprows'] = skiprows-header_rows
header['Ref_date'] = dt.strptime(header['Ref_date'], '%d-%B-%Y')
header['rows'] = header_rows
return header
def parse_dates(x, ref_date):
days, time = x.split('_')
hours, minutes, seconds = time.split(':')
delta = td(days = int(days), hours=int(hours),
minutes=int(minutes), seconds=int(seconds))
return ref_date + delta
def read_file(filename):
header = read_header(filename)
df = pd.read_csv(filename, skiprows=header['skiprows'],
header=list(range(0, header['rows'])), index_col=0)
df['Elapsed time'] = [parse_dates(x, header['Ref_date']) for x in df.index]
df = df.set_index(['Elapsed time'])
return df
def damped_vibrations(t, A, b, w):
return A*exp(-b*t)*cos(w*t)
def compute_png(A, b, w, T, resolution=500):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, w)
plt.figure() # needed to avoid adding curves in plot
plt.plot(t, u)
plt.title('A=%g, b=%g, w=%g' % (A, b, w))
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0) # rewind to beginning of file
figdata_png = base64.b64encode(figfile.getvalue())
return figdata_png
def compute_svg(A, b, w, T, resolution=500):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, w)
plt.figure() # needed to avoid adding curves in plot
plt.plot(t, u)
plt.title('A=%g, b=%g, w=%g' % (A, b, w))
figfile = BytesIO()
plt.savefig(figfile, format='svg')
figfile.seek(0)
figdata_svg = '<svg' + figfile.getvalue().split('<svg')[1]
figdata_svg = unicode(figdata_svg,'utf-8')
return figdata_svg
def compute_mpld3(A, b, w, T, resolution=500):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, T)
fig, ax = plt.subplots()
ax.plot(t, u)
ax.set_title("A={}, b={}, w={}".format(A, b, w))
html_text = mpld3.fig_to_html(fig)
return html_text
def compute_bokeh(A, b, w, T, resolution=500):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, w)
# create a new plot with a title and axis labels
TOOLS = "pan,wheel_zoom,hover,box_zoom,reset,save,box_select,lasso_select"
p = bplt.figure(title="simple line example", tools=TOOLS,
x_axis_label='t', y_axis_label='y', logo=None)
# add a line renderer with legend and line thickness
p.line(t, u, legend="u(t)", line_width=2)
script, div = components(p)
head = """
<link rel="stylesheet"
href="http://cdn.pydata.org/bokeh/release/bokeh-0.9.0.min.css"
type="text/css" />
<script type="text/javascript"
src="http://cdn.pydata.org/bokeh/release/bokeh-0.9.0.min.js">
</script>
<script type="text/javascript">
Bokeh.set_log_level("info");
</script>
"""
return head, script, div
def compute_highcharts_simple(A, b, w, T, resolution=500):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, T)
d = {'t': t, 'u': u}
df = pd.DataFrame(d)
df.set_index('t', inplace=True)
chart = serialize(df, chart_type='stock', render_to='my-chart',
output_type='json' )
return chart
def compute_highcharts(A, b, w, T, resolution=10000):
"""Return filename of plot of the damped_vibration function."""
t = linspace(0, T, resolution+1)
u = damped_vibrations(t, A, b, T)
d = {'t': t, 'u': u}
df = pd.DataFrame(d)
df.set_index('t', inplace=True)
data = serialize(df, output_type='dict', chart_type='stock',
render_to='my-chart',
)
data['chart']['type'] = 'line'
data['chart']['zoomType'] = 'x'
data['chart']['panning'] = True
data['chart']['panKey'] = 'shift'
data['chart']['plotBackgroundColor'] = '#FCFFC5'
data["plotOptions"] = {
"spline": {
"color": "#FF0000",
"lineWidth": 1,
"states": { "hover": { "lineWidth": 1 } },
"marker": {"enabled": True }
}
}
chart = 'new Highcharts.Chart({});'.format(json_encode(data))
return chart
def timeline_pandas_highcharts():
"""Return filename of plot of the damped_vibration function."""
data = defaultdict(lambda: defaultdict(dict))
timeline_data = | pd.read_csv('timeline.txt') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
<NAME> <<EMAIL>>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt
for license details.
'''
# %%
import math
import numpy as np
import pandas as pd
from collections.abc import Iterable
from . import ImpactItem, WasteStream
from ._units_of_measure import auom
from .utils.formatting import format_number as f_num
items = ImpactItem._items
isinstance = isinstance
iter = iter
callable = callable
__all__ = ('LCA',)
class LCA:
'''
For life cycle assessment (LCA) of a System.
Parameters
----------
system : :class:`biosteam.System`
System for which this LCA is conducted for.
lifetime : float
Lifetime of the LCA.
lifetime_unit : str
Unit of lifetime.
uptime_ratio : float
Fraction of time that the plant is operating.
item_quantities : kwargs, :class:`ImpactItem` or str = float/callable or (float/callable, unit)
Other :class:`ImpactItem` objects (e.g., electricity) and their quantities.
Note that callable functions are used so that quantity of items can be updated.
'''
__slots__ = ('_system', '_lifetime', '_uptime_ratio',
'_construction_units', '_transportation_units',
'_lca_streams', '_impact_indicators',
'_other_items', '_other_items_f')
def __init__(self, system, lifetime, lifetime_unit='yr', uptime_ratio=1,
**item_quantities):
system.simulate()
self._construction_units = set()
self._transportation_units = set()
self._lca_streams = set()
self._update_system(system)
self._update_lifetime(lifetime, lifetime_unit)
self.uptime_ratio = uptime_ratio
self._other_items = {}
self._other_items_f = {}
for item, val in item_quantities.items():
try:
f_quantity, unit = val # unit provided for the quantity
except:
f_quantity = val
unit = ''
self.add_other_item(item, f_quantity, unit)
def _update_system(self, system):
for u in system.units:
if u.construction:
self._construction_units.add(u)
if u.transportation:
self._transportation_units.add(u)
self._construction_units = sorted(self._construction_units,
key=lambda u: u.ID)
self._transportation_units = sorted(self._transportation_units,
key=lambda u: u.ID)
for s in (i for i in system.feeds+system.products):
if s.impact_item:
self._lca_streams.add(s)
self._lca_streams = sorted(self._lca_streams, key=lambda s: s.ID)
self._system = system
def _update_lifetime(self, lifetime=0., unit='yr'):
if not unit or unit == 'yr':
self._lifetime = float(lifetime)
else:
converted = auom(unit).convert(float(lifetime), 'yr')
self._lifetime = converted
def add_other_item(self, item, f_quantity, unit=''):
'''Add other :class:`ImpactItem` in LCA.'''
if isinstance(item, str):
item = items[item]
fu = item.functional_unit
if not callable(f_quantity):
f = lambda: f_quantity
else:
f = f_quantity
quantity = f()
if unit and unit != fu:
try:
quantity = auom(unit).convert(quantity, fu)
except:
raise ValueError(f'Conversion of the given unit {unit} to '
f'item functional unit {fu} is not supported.')
self._other_items_f[item.ID] = {'item':item, 'f_quantity':f, 'unit':unit}
self.other_items[item.ID] = {'item':item, 'quantity':quantity}
def refresh_other_items(self):
'''Refresh quantities of other items using the given functions.'''
for item_ID, record in self._other_items_f.items():
item, f_quantity, unit = record.values()
self.other_items[item_ID]['quantity'] = f_quantity()
def __repr__(self):
return f'<LCA: {self.system}>'
def show(self, lifetime_unit='yr'):
'''Show basic information of this :class:`LCA` object.'''
lifetime = auom('yr').convert(self.lifetime, lifetime_unit)
info = f'LCA: {self.system} (lifetime {f_num(lifetime)} {lifetime_unit})'
info += '\nImpacts:'
print(info)
if len(self.indicators) == 0:
print(' None')
else:
index = pd.Index((i.ID+' ('+i.unit+')' for i in self.indicators))
df = pd.DataFrame({
'Construction': tuple(self.total_construction_impacts.values()),
'Transportation': tuple(self.total_transportation_impacts.values()),
'WasteStream': tuple(self.total_stream_impacts.values()),
'Others': tuple(self.total_other_impacts.values()),
'Total': tuple(self.total_impacts.values())
},
index=index)
# print(' '*9+df.to_string().replace('\n', '\n'+' '*9))
print(df.to_string())
_ipython_display_ = show
def get_construction_impacts(self, units, time=None, time_unit='hr'):
'''
Return all construction-related impacts for the given unit,
normalized to a certain time frame.
'''
if not isinstance(units, Iterable):
units = (units,)
if not time:
ratio = 1
else:
converted = auom(time_unit).convert(float(time), 'hr')
ratio = converted/self.lifetime_hr
impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)
for i in units:
for j in i.construction:
impact = j.impacts
if j.lifetime is not None:
factor = math.ceil(time/j.lifetime)
else:
factor = 1.
for m, n in impact.items():
impacts[m] += n*ratio*factor
return impacts
def get_transportation_impacts(self, units, time=None, time_unit='hr'):
'''
Return all transportation-related impacts for the given unit,
normalized to a certain time frame.
'''
if not (isinstance(units, tuple) or isinstance(units, list)
or isinstance(units, set)):
units = (units,)
if not time:
time = self.lifetime_hr
else:
time = auom(time_unit).convert(float(time), 'hr')
impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)
for i in units:
for j in i.transportation:
impact = j.impacts
for m, n in impact.items():
impacts[m] += n*time/j.interval
return impacts
def get_stream_impacts(self, stream_items=None, exclude=None,
kind='all', time=None, time_unit='hr'):
'''
Return all stream-related impacts for the given streams,
normalized to a certain time frame.
'''
if not (isinstance(stream_items, tuple) or isinstance(stream_items, list)
or isinstance(stream_items, set)):
stream_items = (stream_items,)
if not (isinstance(exclude, tuple) or isinstance(exclude, list)
or isinstance(exclude, set)):
exclude = (exclude,)
if stream_items == None:
stream_items = self.stream_inventory
impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)
if not time:
time = self.lifetime_hr
else:
time = auom(time_unit).convert(float(time), 'hr')
for j in stream_items:
# In case that ws instead of the item is given
if isinstance(j, WasteStream):
ws = j
if j.impact_item:
j = ws.impact_item
else: continue
else:
ws = j.linked_stream
if ws in exclude: continue
for m, n in j.CFs.items():
if kind == 'all':
pass
elif kind == 'direct_emission':
n = max(n, 0)
elif kind == 'offset':
n = min(n, 0)
else:
raise ValueError('kind can only be "all", "direct_emission", or "offset", '
f'not {kind}.')
impacts[m] += n*time*ws.F_mass
return impacts
def get_other_impacts(self):
'''
Return all additional impacts from "other" :class:`ImpactItems` objects,
based on defined quantity.
'''
self.refresh_other_items()
impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)
other_dct = self.other_items
for i in other_dct.keys():
item = items[i]
for m, n in item.CFs.items():
impacts[m] += n*other_dct[i]['quantity']
return impacts
def get_total_impacts(self, exclude=None, time=None, time_unit='hr'):
'''Return total impacts, normalized to a certain time frame.'''
impacts = dict.fromkeys((i.ID for i in self.indicators), 0.)
ws_impacts = self.get_stream_impacts(stream_items=self.stream_inventory,
exclude=exclude, time=time, time_unit=time_unit)
for i in (self.total_construction_impacts,
self.total_transportation_impacts,
ws_impacts,
self.total_other_impacts):
for m, n in i.items():
impacts[m] += n
return impacts
def get_allocated_impacts(self, streams=(), allocate_by='mass'):
'''
Allocate total impacts to one or multiple streams.
Parameters
----------
streams : :class:`WasteStream` or sequence
One or a sequence of streams. Note that impacts of these streams will be
excluded in calculating the total impacts.
allocate_by : str, sequence, or function to generate an sequence
If provided as a str, can be "mass", "energy", or 'value' to allocate
the impacts accordingly.
If provided as a sequence (no need to normalize so that sum of the sequence is 1),
will allocate impacts according to the sequence.
If provided as a function, will call the function to generate an
sequence to allocate the impacts accordingly.
.. note::
Energy of the stream will be calcuated as the sum of HHVs of all components
in the stream.
'''
if not (isinstance(streams, tuple) or isinstance(streams, list)
or isinstance(streams, set)):
streams = (streams,)
impact_dct = self.get_total_impacts(exclude=streams)
impact_vals = np.array([i for i in impact_dct.values()])
allocated = {}
if len(streams) == 1:
return impact_dct
if allocate_by == 'mass':
ratios = np.array([i.F_mass for i in streams])
elif allocate_by == 'energy':
ratios = np.array([i.HHV for i in streams])
elif allocate_by == 'value':
ratios = np.array([i.F_mass*i.price for i in streams])
elif iter(allocate_by):
ratios = allocate_by
elif callable(allocate_by):
ratios = allocate_by()
else:
raise ValueError('allocate_by can only be "mass", "energy", "value", '
'a sequence, or a function to generate a sequence.')
if ratios.sum() == 0:
raise ValueError('Calculated allocation ratios are all zero, cannot allocate.')
ratios = ratios/ratios.sum()
for n, ws in enumerate(streams):
if not ws in self.system.streams:
raise ValueError(f'`WasteStream` {ws} not in the system.')
allocated[ws.ID] = dict.fromkeys(impact_dct.keys(),
(ratios[n]*impact_vals).sum())
return allocated
def get_unit_impacts(self, units, time=None, time_unit='hr',
exclude=None):
'''Return total impacts with certain units, normalized to a certain time frame. '''
if not (isinstance(units, tuple) or isinstance(units, list)
or isinstance(units, set)):
units = (units,)
constr = self.get_construction_impacts(units, time, time_unit)
trans = self.get_transportation_impacts(units, time, time_unit)
ws_items = set(i for i in
sum((tuple(unit.ins+unit.outs) for unit in units), ())
if i.impact_item)
ws = self.get_stream_impacts(stream_items=ws_items, exclude=exclude,
time=time, time_unit=time_unit)
other = self.get_other_impacts()
tot = constr.copy()
for m in tot.keys():
tot[m] += trans[m] + ws[m] + other[m]
return tot
def _append_cat_sum(self, cat_table, cat, tot):
num = len(cat_table)
cat_table.loc[num] = ''
for i in self.indicators:
cat_table[f'{i.ID} [{i.unit}]'][num] = tot[i.ID]
cat_table[f'Category {i.ID} Ratio'][num] = 1
if cat in ('construction', 'transportation'):
cat_table.rename(index={num: ('Sum', 'All')}, inplace=True)
cat_table.index = \
pd.MultiIndex.from_tuples(cat_table.index,
names=[cat.capitalize(), 'SanUnit'])
else:
cat_table.rename(index={num: 'Sum'}, inplace=True)
return cat_table
def get_impact_table(self, category=None, time=None, time_unit='hr'):
'''
Return a :class:`pandas.DataFrame` table for the given impact category,
normalized to a certain time frame.
'''
if not time:
time = self.lifetime_hr
else:
time = auom(time_unit).convert(float(time), 'hr')
if category in ('Construction', 'Other'):
time = time/self.lifetime_hr
cat = category.lower()
tot = getattr(self, f'total_{cat}_impacts')
if category in ('Construction', 'Transportation'):
cat = category.lower()
units = sorted(getattr(self, f'_{cat}_units'),
key=(lambda su: su.ID))
items = sorted(set(i.item for i in getattr(self, f'{cat}_inventory')),
key=(lambda item: item.ID))
if len(items) == 0:
return f'No {cat}-related impacts.'
# Note that item_dct = dict.fromkeys([item.ID for item in items], []) won't work
item_dct = dict.fromkeys([item.ID for item in items])
for item_ID in item_dct.keys():
item_dct[item_ID] = dict(SanUnit=[], Quantity=[])
for su in units:
for i in getattr(su, cat):
item_dct[i.item.ID]['SanUnit'].append(su.ID)
item_dct[i.item.ID]['Quantity'].append(i.quantity*time)
if cat == 'transportation':
item_dct[i.item.ID]['Quantity'][-1] /= i.interval
dfs = []
for item in items:
dct = item_dct[item.ID]
dct['SanUnit'].append('Total')
dct['Quantity'] = np.array(dct['Quantity'])
dct['Quantity'] = np.append(dct['Quantity'], dct['Quantity'].sum())
dct['Item Ratio'] = dct['Quantity']/dct['Quantity'].sum()*2
for i in self.indicators:
if i.ID in item.CFs:
dct[f'{i.ID} [{i.unit}]'] = impact = dct['Quantity']*item.CFs[i.ID]
dct[f'Category {i.ID} Ratio'] = impact/tot[i.ID]
else:
dct[f'{i.ID} [{i.unit}]'] = dct[f'Category {i.ID} Ratio'] = 0
df = pd.DataFrame.from_dict(dct)
index0 = f'{item.ID} [{item.functional_unit}]'
df.set_index([pd.MultiIndex.from_arrays(
[(index0,)*len(dct['SanUnit'])], names=(category,)),
'SanUnit'],
inplace=True)
dfs.append(df)
table = pd.concat(dfs)
return self._append_cat_sum(table, cat, tot)
ind_head = sum(([f'{i.ID} [{i.unit}]',
f'Category {i.ID} Ratio'] for i in self.indicators), [])
if category == 'Stream':
headings = ['Stream', 'Mass [kg]', *ind_head]
item_dct = dict.fromkeys(headings)
for key in item_dct.keys():
item_dct[key] = []
for ws_item in self.stream_inventory:
ws = ws_item.linked_stream
item_dct['Stream'].append(ws.ID)
mass = ws.F_mass * time
item_dct['Mass [kg]'].append(mass)
for ind in self.indicators:
if ind.ID in ws_item.CFs.keys():
impact = ws_item.CFs[ind.ID]*mass
item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)
item_dct[f'Category {ind.ID} Ratio'].append(impact/tot[ind.ID])
else:
item_dct[f'{ind.ID} [{ind.unit}]'].append(0)
item_dct[f'Category {ind.ID} Ratio'].append(0)
table = pd.DataFrame.from_dict(item_dct)
table.set_index(['Stream'], inplace=True)
return self._append_cat_sum(table, cat, tot)
elif category == 'Other':
headings = ['Other', 'Quantity', *ind_head]
item_dct = dict.fromkeys(headings)
for key in item_dct.keys():
item_dct[key] = []
for other_ID in self.other_items.keys():
other = self.other_items[other_ID]['item']
item_dct['Other'].append(f'{other_ID} [{other.functional_unit}]')
quantity = self.other_items[other_ID]['quantity'] * time
item_dct['Quantity'].append(quantity)
for ind in self.indicators:
if ind.ID in other.CFs.keys():
impact = other.CFs[ind.ID]*quantity
item_dct[f'{ind.ID} [{ind.unit}]'].append(impact)
item_dct[f'Category {ind.ID} Ratio'].append(impact/tot[ind.ID])
else:
item_dct[f'{ind.ID} [{ind.unit}]'].append(0)
item_dct[f'Category {ind.ID} Ratio'].append(0)
table = | pd.DataFrame.from_dict(item_dct) | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
from copy import deepcopy
def main(net):
'''
calculate pvalue of category closeness
'''
# calculate the distance between the data points within the same category and
# compare to null distribution
for inst_rc in ['row', 'col']:
inst_nodes = deepcopy(net.dat['nodes'][inst_rc])
inst_index = deepcopy(net.dat['node_info'][inst_rc]['clust'])
# reorder based on clustered order
inst_nodes = [ inst_nodes[i] for i in inst_index]
# make distance matrix dataframe
dm = dist_matrix_lattice(inst_nodes)
node_infos = list(net.dat['node_info'][inst_rc].keys())
all_cats = []
for inst_info in node_infos:
if 'dict_cat_' in inst_info:
all_cats.append(inst_info)
for cat_dict in all_cats:
tmp_dict = net.dat['node_info'][inst_rc][cat_dict]
pval_name = cat_dict.replace('dict_','pval_')
net.dat['node_info'][inst_rc][pval_name] = {}
for cat_name in tmp_dict:
subset = tmp_dict[cat_name]
inst_median = calc_median_dist_subset(dm, subset)
hist = calc_hist_distances(dm, subset, inst_nodes)
pval = 0
for i in range(len(hist['prob'])):
if i == 0:
pval = hist['prob'][i]
if i >= 1:
if inst_median >= hist['bins'][i]:
pval = pval + hist['prob'][i]
net.dat['node_info'][inst_rc][pval_name][cat_name] = pval
def dist_matrix_lattice(names):
from scipy.spatial.distance import pdist, squareform
lattice_size = len(names)
mat = np.zeros([lattice_size, 1])
mat[:,0] = list(range(lattice_size))
inst_dm = pdist(mat, metric='euclidean')
inst_dm[inst_dm < 0] = float(0)
inst_dm = squareform(inst_dm)
df = pd.DataFrame(data=inst_dm, columns=names, index=names)
return df
def calc_median_dist_subset(dm, subset):
return np.median(dm[subset].ix[subset].values)
def calc_hist_distances(dm, subset, inst_nodes):
np.random.seed(100)
num_null = 1000
num_points = len(subset)
median_dist = []
for i in range(num_null):
tmp = np.random.choice(inst_nodes, num_points, replace=False)
median_dist.append( np.median(dm[tmp].ix[tmp].values) )
tmp_dist = sorted(deepcopy(median_dist))
median_dist = np.asarray(median_dist)
s1 = | pd.Series(median_dist) | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_context_history_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3], 'column2': [2, 3, 1]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1],
rewards=[0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=[[1]],
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
def test_context_series(self):
contexts = pd.DataFrame({'column1': [1, 2, 3, 3, 2, 1], 'column2': [2, 3, 1, 1, 2, 3]})
for lp in BaseTest.para_lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(mab._imp.arm_to_model[0].beta.shape[0], 1)
for cp in BaseTest.nps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
for cp in BaseTest.cps:
for lp in BaseTest.lps:
arm, mab = self.predict(arms=[0, 1],
decisions=[1, 1, 1, 1, 1, 1],
rewards=[0, 0, 0, 0, 0, 0],
learning_policy=lp,
neighborhood_policy=cp,
context_history=contexts['column1'],
contexts=pd.Series([1]),
seed=123456,
num_run=1,
is_predict=True)
self.assertEqual(np.ndim(mab._imp.contexts), 2)
#################################################
# Test contextual predict() method
################################################
def test_context_arm_list_int(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_arm_list_str(self):
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C", "D"],
decisions=["A", "A", "A", "B", "B", "C", "C", "C", "C", "C"],
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, -2, 2, 3, 11], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, -5, 2, 3, 10], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, -2, 4, 3, 9], [20, 19, 18, 17, 16], [1, 2, 1, 1, 3],
[17, 18, 17, 19, 18]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=[0, 1, 1, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.cps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=[1, 1, 1, 2, 2, 3, 3, 3, 3, 3],
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
neighborhood_policy=cp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_context_decision_reward_series(self):
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards=pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3],
[0, 2, 1, 0, 0]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
for cp in MABTest.nps:
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3, 4],
decisions=pd.Series([1, 1, 1, 2, 2, 3, 3, 3, 3, 3]),
rewards= | pd.Series([0, 1, 1, 0, 0, 0, 0, 1, 1, 1]) | pandas.Series |
from os.path import exists, join
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import trange
from math import ceil
from traitlets import Dict, List
from ctapipe.core import Tool
from targetpipe.fitting.spe_sipm import sipm_spe_fit
from targetpipe.fitting.chec import CHECSSPEFitter, CHECSSPEMultiFitter
from targetpipe.plots.official import ThesisPlotter
from IPython import embed
def get_params(lambda_=1):
params = dict(
norm=1000,
eped=-0.6,
eped_sigma=0.4,
spe=1.4,
spe_sigma=0.2,
lambda_=lambda_,
opct=0.6,
pap=0.3,
dap=0.4
)
return params.copy()
def get_params_multi(params1, params2, params3):
params_multi = dict(
norm1=params1['norm'],
norm2=params2['norm'],
norm3=params3['norm'],
eped=params1['eped'],
eped_sigma=params1['eped_sigma'],
spe=params1['spe'],
spe_sigma=params1['spe_sigma'],
lambda_1=params1['lambda_'],
lambda_2=params2['lambda_'],
lambda_3=params3['lambda_'],
opct=params1['opct'],
pap=params1['pap'],
dap=params1['dap']
)
return params_multi.copy()
def get_initial(lambda_=1):
params = dict(
norm=None,
eped=-0.5,
eped_sigma=0.5,
spe=1,
spe_sigma=0.1,
lambda_=lambda_,
opct=0.5,
pap=0.5,
dap=0.5
)
return params.copy()
def get_initial_multi(initial1, initial2, initial3):
params_multi = dict(
norm1=initial1['norm'],
norm2=initial2['norm'],
norm3=initial3['norm'],
eped=initial1['eped'],
eped_sigma=initial1['eped_sigma'],
spe=initial1['spe'],
spe_sigma=initial1['spe_sigma'],
lambda_1=initial1['lambda_'],
lambda_2=initial2['lambda_'],
lambda_3=initial3['lambda_'],
opct=initial1['opct'],
pap=initial1['pap'],
dap=initial1['dap']
)
return params_multi.copy()
def sample_distribution(x, params, n=30000):
y = sipm_spe_fit(x, **params)
samples = np.random.choice(x, 30000, p=y / y.sum())
return samples, y
class FitPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
def plot(self):
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(2)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(3)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
# Generate the functions
found_good = False
found_bad = False
i = 0
while not found_good or not found_bad:
self.log.info("FitPlotter: Attempt {}".format(i))
i += 1
params1 = get_params(1.2)
params2 = get_params(1.7)
params3 = get_params(3.1)
x = np.linspace(-3, 10, 1000)
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
params_multi = get_params_multi(params1, params2, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
print(pm, p1, p2, p3)
if (pm > p1) & (pm > p2) & (pm > p3) & (p1 < 0.0001):
if found_good:
continue
self.log.info("FitPlotter: Found good")
found_good = True
desc = "good"
elif (pm < 0.001) & (p3 > 0.001):
if found_bad:
continue
self.log.info("FitPlotter: Found bad")
found_bad = True
desc = "bad"
else:
continue
fig_individual = plt.figure(figsize=(13, 6))
fig_individual.suptitle("Individual Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax1_t = plt.subplot2grid((3, 2), (0, 1))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax2_t = plt.subplot2grid((3, 2), (1, 1))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax3_t = plt.subplot2grid((3, 2), (2, 1))
self.individual_plot(x, y1, params1, samples1, fitter1, ax1, ax1_t, True)
self.individual_plot(x, y2, params2, samples2, fitter2, ax2, ax2_t)
self.individual_plot(x, y3, params3, samples3, fitter3, ax3, ax3_t)
name = "fit_" + desc + "_individual"
self.figures[name] = fig_individual
fig_multi = plt.figure(figsize=(13, 6))
fig_multi.suptitle("Multi Fit")
ax1 = plt.subplot2grid((3, 2), (0, 0))
ax2 = plt.subplot2grid((3, 2), (1, 0))
ax3 = plt.subplot2grid((3, 2), (2, 0))
ax_mt = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
self.multi_plot(x, [y1, y2, y3], params_multi, [samples1, samples2, samples3], fitter_multi, [ax1, ax2, ax3], ax_mt)
name = "fit_" + desc + "_multi"
self.figures[name] = fig_multi
def save(self, output_path=None):
for name, fig in self.figures.items():
self.fig = fig
self.figure_name = name
super().save(output_path)
@staticmethod
def individual_plot(x, y, params, samples, fitter, ax_p, ax_t, legend=False):
hist = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax_p.plot(x, y, label="Base")
ax_p.hist(between, bins=edges, weights=hist, histtype='step', label="Hist")
ax_p.plot(x, fit, label="Fit")
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
ax_t.axis('off')
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
table.scale(0.7, 0.7)
if legend:
ax_p.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
@staticmethod
def multi_plot(x, y_list, params, samples_list, fitter, ax_list, ax_t):
y1, y2, y3 = y_list
samples1, samples2, samples3 = samples_list
ax1, ax2, ax3 = ax_list
hist1, hist2, hist3 = fitter.hist
edges = fitter.edges
between = fitter.between
coeff = fitter.coeff.copy()
coeffl = fitter.coeff_list.copy()
initial = fitter.initial.copy()
fit1, fit2, fit3 = fitter.fit_function(x, **coeff)
rc2 = fitter.reduced_chi2
pval = fitter.p_value
ax1.plot(x, y1, label="Base")
ax1.hist(between, bins=edges, weights=hist1, histtype='step', label="Hist")
ax1.plot(x, fit1, label="Fit")
ax1.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
ax2.plot(x, y2, label="Base")
ax2.hist(between, bins=edges, weights=hist2, histtype='step', label="Hist")
ax2.plot(x, fit2, label="Fit")
ax3.plot(x, y3, label="Base")
ax3.hist(between, bins=edges, weights=hist3, histtype='step', label="Hist")
ax3.plot(x, fit3, label="Fit")
ax_t.axis('off')
td = [['%.3f' % params[i], initial[i], '%.3f' % coeff[i]] for i in coeffl]
td.append(["", "", '%.3g' % rc2])
td.append(["", "", '%.3g' % pval])
tr = coeffl
tr.append("Reduced Chi^2")
tr.append("P-Value")
tc = ['Base', 'Initial', 'Fit']
table = ax_t.table(cellText=td, rowLabels=tr, colLabels=tc, loc='center')
table.set_fontsize(6)
class NoInitialPlotter(ThesisPlotter):
def __init__(self, config, tool, **kwargs):
super().__init__(config, tool, **kwargs)
self.figures = dict()
self.dataset_path = self.output_path + "_data.h5"
self.initial1 = 1
self.initial2 = 1
self.initial3 = 1
self.figures = {}
def plot(self):
df = self.load_dataset()
df = df[df > 0.01].groupby('x').count().reset_index()
x = df['x']
y1 = df['p1']
y2 = df['p2']
y3 = df['p3']
ym = df['pm']
x = ['%.3f\n%.3f\n%.3f\n' % (i[0], i[1], i[2]) for i in x]
self.fig, self.ax = self.create_figure()
self.add_points(x, y1, "Individual1")
self.add_points(x, y2, "Individual2")
self.add_points(x, y3, "Individual3")
self.add_points(x, ym, "Multi")
self.ax.set_xlabel("lambda")
self.ax.set_ylabel("Number of signficant p-values")
self.ax.legend(loc=1, frameon=True, fancybox=True, framealpha=0.7)
self.figures[self.figure_name + "_p"] = self.fig
def add_points(self, x, y, label, p='-'):
x_i = np.arange(len(x))
self.ax.plot(x_i, y, p, label=label)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def add_points_err(self, x, y, y_err, label):
x_i = np.arange(len(x))
(_, caps, _) = self.ax.errorbar(x_i, y, xerr=None, yerr=y_err, fmt='o',
mew=0.5, label=label,
markersize=3, capsize=3)
for cap in caps:
cap.set_markeredgewidth(1)
self.ax.set_xticks(x_i)
self.ax.set_xticklabels(x)
def save(self, output_path=None):
for name, fig in self.figures.items():
self.figure_name = name
self.fig = fig
super().save(output_path)
def load_dataset(self):
if exists(self.dataset_path):
store = pd.HDFStore(self.dataset_path)
df = store['df']
else:
df = self.create_dataset()
store = pd.HDFStore(self.dataset_path)
store['df'] = df
return df
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter1.initial = get_initial(1)
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter2.initial = get_initial(1)
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter3.initial = get_initial(1)
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
fitter_multi.initial = get_initial_multi(fitter1.initial, fitter2.initial, fitter3.initial)
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = pd.DataFrame(df_list)
return df
class WithInitialPlotter(NoInitialPlotter):
def create_dataset(self):
df_list = []
# Create the fitters
range_ = [-3, 10]
nbins = 100
fitter1 = CHECSSPEFitter(self.config, self.parent)
fitter1.range = range_
fitter1.nbins = nbins
fitter2 = CHECSSPEFitter(self.config, self.parent)
fitter2.range = range_
fitter2.nbins = nbins
fitter3 = CHECSSPEFitter(self.config, self.parent)
fitter3.range = range_
fitter3.nbins = nbins
fitter_multi = CHECSSPEMultiFitter(self.config, self.parent)
fitter_multi.range = range_
fitter_multi.nbins = nbins
lambda_1 = np.linspace(0.3, 1.5, 10)
lambda_2 = np.linspace(0.5, 3, 10)
lambda_3 = np.linspace(0.7, 4.5, 10)
for i in trange(10):
params1 = get_params(lambda_1[i])
params2 = get_params(lambda_2[i])
params3 = get_params(lambda_3[i])
fitter1.initial = get_initial(round(lambda_1[i]))
fitter2.initial = get_initial(round(lambda_2[i]))
fitter3.initial = get_initial(round(lambda_3[i]))
fitter_multi.initial = get_initial_multi(fitter1.initial,
fitter2.initial,
fitter3.initial)
params_multi = get_params_multi(params1, params2, params3)
x = np.linspace(-3, 10, 1000)
for j in trange(100):
samples1, y1 = sample_distribution(x, params1)
samples2, y2 = sample_distribution(x, params2)
samples3, y3 = sample_distribution(x, params3)
fitter1.apply(samples1)
p1 = fitter1.p_value
fitter2.apply(samples2)
p2 = fitter2.p_value
fitter3.apply(samples3)
p3 = fitter3.p_value
fitter_multi.apply_multi(samples1, samples2, samples3)
pm = fitter_multi.p_value
df_list.append(dict(x=(lambda_1[i], lambda_2[i], lambda_3[i]),
p1=p1, p2=p2, p3=p3, pm=pm))
df = | pd.DataFrame(df_list) | pandas.DataFrame |
#!/usr/bin/python3
# Functions to handle Input
#############################################################################################
def read_csv(file):
# simple function to read data from a file
data = pd.read_csv(file, sep=';')
return data
# Functions to handle string/value conversion
#############################################################################################
# function converts format (DD-)HH:MM:SS to seconds
def ave2sec(x):
if ( '-' in x ):
vals = x.split('-')
times = vals[1].split(':')
sec = 24*3600*int(vals[0])+3600*int(times[0])+60*int(times[1])+int(times[2])
else:
times = x.split(':')
sec = 3600*int(times[0])+60*int(times[1])+int(times[2])
return (sec)
def scalingref(x):
# returns reference scaling factor for MPI jobs based on 1.5 factor:
# doubling cores should make parformance x1.5 (or better)
if int(x) == 1:
ref = 1
else:
ref = np.power(1/1.5,np.log2(int(x)))
return ref
def rss2g(x):
return int(float(x[:-1]))/1024
def reqmem2g(x):
return int(float(x[:-2]))/1024
# Functions to handle DataFrames
#############################################################################################
def parse_df(data):
# convert multi-line DataFrame to more compact form for analysis
import datetime
from dateutil import parser
data[['id','subid']] = data.JobID.str.split('_',1, expand=True)
data.drop(['subid'],axis=1, inplace=True)
df=pd.DataFrame()
df=data[~data['JobID'].str.contains("\.")]
df.rename(columns={'State': 'Parentstate'}, inplace=True)
data2=data.shift(-1).dropna(subset=['JobID'])
df2=data2[data2['JobID'].str.contains("\.batch")]
data2=data.shift(-2).dropna(subset=['JobID'])
df3=data2[data2['JobID'].str.contains("\.0")]
df.update(df2.MaxRSS)
df.update(df3.MaxRSS)
df.update(df2.AveCPU)
df.update(df3.AveCPU)
df=df.join(df2[['State']])
df.update(df3.State)
# drop columns that all all nan
df.dropna(axis=1, inplace=True, how='all')
# drop rows where any element is nan (errors in the data)
df.dropna(axis=0, inplace=True, how='any')
df.reset_index(inplace=True)
df.loc[:,'State']=df.State.apply(str)
# reduce data point, 5min accuracy instead of seconds just fine
df['Submit']=pd.to_datetime(df['Submit']).dt.round('5min')
df['Start']=pd.to_datetime(df['Start']).dt.round('5min')
df['End']= | pd.to_datetime(df['End']) | pandas.to_datetime |
from config import *
import pandas as pd
import numpy as np
import networkx as nx
import glob, os
import bct
from sklearn import preprocessing
def normalize(df):
'''
normalize dataframe columns by mean
'''
min_max_scaler = preprocessing.MinMaxScaler()
x = df.values #returns a numpy array
x_scaled = min_max_scaler.fit_transform(x)
normalized = pd.DataFrame(x_scaled)
normalized.columns = df.columns
return normalized
def loc_mes_disconnected(G,M):
"""
computes and saves local measures for a disconnected graph G
"""
m = {}
m['ID'] = G.nodes
m['degree'] = \
bct.degrees_und(M)
m['betweenness'] = \
np.fromiter(nx.betweenness_centrality(G).values(), dtype=float)
m['betweennessbct'] = \
bct.betweenness_bin(M)
m['eigenvector'] = \
np.fromiter(nx.eigenvector_centrality(G, max_iter=500).values(), dtype=float)
m['eigenvectorbct'] = \
bct.eigenvector_centrality_und(M)
m['katz'] = \
np.fromiter(nx.katz_centrality_numpy(G).values(), dtype=float)
m['closeness'] = \
np.fromiter(nx.closeness_centrality(G).values(), dtype=float)
m['load'] = \
np.fromiter(nx.load_centrality(G).values(), dtype=float)
m['clustering_coef'] = \
np.fromiter(nx.clustering(G).values(), dtype=float)
m['clustering_coefbct'] = \
bct.clustering_coef_bu(M)
m['pagerank'] = \
np.fromiter(nx.pagerank(G).values(), dtype=float)
m['pagerank_d85bct'] = \
bct.pagerank_centrality(M, d = .85)
m['subgraph'] = \
np.fromiter(nx.subgraph_centrality(G).values(), dtype=float)
m['subgraphbct'] = \
bct.subgraph_centrality(M)
m['harmonic'] = \
np.fromiter(nx.harmonic_centrality(G).values(), dtype=float)
return pd.DataFrame.from_dict(m)
def glob_mes_disconnected(G):
"""
computes and saves global measures for a disconnected graph G
"""
m = {}
# transitivity: the fraction of all possible triangles present in G.
m['transitivity'] = [nx.transitivity(G)]
# average_clustering: Compute the average clustering coefficient
m['average_clustering'] = [nx.average_clustering(G)]
m['local_efficiency'] = [nx.local_efficiency(G)]
m['global_efficiency'] = [nx.global_efficiency(G)]
m['pearson_correlation'] = nx.degree_pearson_correlation_coefficient(G)
return pd.DataFrame.from_dict(m)
def loc_mes_connected(G,ct):
"""
computes and saves local measures for a connected graph G
"""
#ordered list of degrees
lss=[]
for i in list(G.degree(nx.nodes(G))):
lss.append(i[1])
k=list(set(lss))
m = {}
m['ID'] = G.nodes
m['degree'] = lss
m['eccentricity'] = \
np.fromiter(nx.eccentricity(G).values(), dtype=float)
m['betweenness'] = \
np.fromiter(nx.betweenness_centrality(G).values(), dtype=float)
m['com_betweenness'] = \
np.fromiter(nx.communicability_betweenness_centrality(G).values(), dtype=float)
m['eigenvector'] = \
np.fromiter(nx.eigenvector_centrality(G, max_iter=500).values(), dtype=float)
m['katz'] = \
np.fromiter(nx.katz_centrality_numpy(G).values(), dtype=float)
m['closeness'] = \
np.fromiter(nx.closeness_centrality(G).values(), dtype=float)
m['current_flow_closeness'] = \
np.fromiter(nx.current_flow_closeness_centrality(G).values(), dtype=float)
m['load'] = \
np.fromiter(nx.load_centrality(G).values(), dtype=float)
m['clustering_coef'] = \
np.fromiter(nx.clustering(G).values(), dtype=float)
if ct!='glasso':
m['pagerank'] = \
np.fromiter(nx.pagerank(G,max_iter=500).values(), dtype=float)
m['subgraph'] = \
np.fromiter(nx.subgraph_centrality(G).values(), dtype=float)
m['harmonic'] = \
np.fromiter(nx.harmonic_centrality(G).values(), dtype=float)
return pd.DataFrame.from_dict(m)
def glob_mes_connected(G):
"""
computes and saves global measures for a connected graph G
"""
m = {}
m['density'] = nx.density(G)
m['average_shortest_path_length'] = nx.average_shortest_path_length(G)
# transitivity: the fraction of all possible triangles present in G.
m['transitivity'] = nx.transitivity(G)
# average_clustering: Compute the average clustering coefficient
m['average_clustering'] = nx.average_clustering(G)
m['center'] = [nx.center(G)]
m['diameter'] = nx.diameter(G)
m['radius'] = nx.radius(G)
m['periphery'] = [nx.periphery(G)]
m['local_efficiency'] = nx.local_efficiency(G)
m['global_efficiency'] = nx.global_efficiency(G)
m['pearson_correlation'] = nx.degree_pearson_correlation_coefficient(G)
# The small-world coefficient defined as: sigma = C/Cr / L/Lr
#m['sigma'] = nx.sigma(G)
# The small-world coefficient defined as: omega = Lr/L - C/Cl
#m['omega'] = nx.omega(G)
return pd.DataFrame.from_dict(m)
def compute_measures(subjects,
denoising_strategies,
correlation_types,
thresholding_methods,
thresholding_values,
negative_corr = False):
global rootdir
for sub in subjects:
for ds in denoising_strategies:
for ct in correlation_types:
for tm in thresholding_methods:
for tv in thresholding_values:
if tm=='userdefined':
tm = '%s-%.3f'%(tm,tv)
# read data
corrGdir = glob.glob(rootdir + "/data/04_correlations/corr-%s/ds-%s/*%s*.gexf"
%(ct,ds,sub))
corrMdir = glob.glob(rootdir + "/data/04_correlations/corr-%s/ds-%s/*%s*.npy"
%(ct,ds,sub))
if not negative_corr:
adjGdir = glob.glob(rootdir + "/data/05_adjacency_matrices/positive/tm-*%s*/corr-%s/ds-%s/*%s*.gexf"%(tm,ct,ds,sub))
adjMdir = glob.glob(rootdir + "/data/05_adjacency_matrices/positive/tm-*%s*/corr-%s/ds-%s/*%s*.npy"%(tm,ct,ds,sub))
else:
adjGdir = glob.glob(rootdir + "/data/05_adjacency_matrices/negative/tm-*%s*/corr-%s/ds-%s/*%s*.gexf"%(tm,ct,ds,sub))
adjMdir = glob.glob(rootdir + "/data/05_adjacency_matrices/negative/tm-*%s*/corr-%s/ds-%s/*%s*.npy"%(tm,ct,ds,sub))
corrG = nx.read_gexf(corrGdir[0])
corrM = np.load(corrMdir[0])
adjG = nx.read_gexf(adjGdir[0])
adjM = np.load(adjMdir[0])
# compute the giant component
adjG_gc = [adjG.subgraph(c).copy() for c in nx.connected_components(adjG)]
adjG_gc = adjG_gc[0]
# compute measures
loc_mes_adj = loc_mes_disconnected(adjG,adjM)
glob_mes_adj = glob_mes_disconnected(adjG)
gc_loc_mes_adj = loc_mes_connected(adjG_gc)
gc_glob_mes_adj = glob_mes_connected(adjG_gc)
# compute normalized local measures
loc_mes_adj_norm = normalize(loc_mes_adj)
loc_mes_adj_norm.ID = loc_mes_adj.ID
gc_loc_mes_adj_norm = normalize(gc_loc_mes_adj)
gc_loc_mes_adj_norm.ID = gc_loc_mes_adj.ID
# save measures
dirc = rootdir + '/data/06_network_measures/positive/tm-%s/corr-%s/ds-%s/sub-%s'%(tm,ct,ds,sub)
if negative_corr:
dirc = rootdir + '/data/06_network_measures/negative/tm-%s/corr-%s/ds-%s/sub-%s'%(tm,ct,ds,sub)
os.system('mkdir -p %s'%dirc)
loc_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
glob_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_global_measures.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
gc_loc_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
gc_glob_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_global_measures_giant_component.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
loc_mes_adj_norm.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_norm.csv'
%(dirc,sub,ds,ct,tm),sep='\t')
gc_loc_mes_adj_norm.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component_norm.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
print('✓ subject: %s, Denoising Strategy: %s, Correlation Type: %s, \
Thresholding Methods: %s'%(sub,ds,ct,tm))
def add_measure(gl,c,bw,
mes,mes_name,
subjects,
denoising_strategies,
smethods,
negative_corr = False):
'''
adding new measures to the existing measure files for all subjects
inputs:
gl = It could be 'global' or 'local'.
c = It could be 'connected' or 'disconnected'.
bw = It could be 'binary' or 'weighted'
mes = a function for computing the measure values
for local measures the output would be a scaler
for global measures the output is a dictionary
with node IDs as key and the corresponding meacure as value.
subjects = list of subject IDs.
denoising_strategies = list of denoising strategies that you want to include.
thresholding_methods = list of thresholding methods that you want to include.
'''
global rootdir
for sub in subjects:
for ds in denoising_strategies:
for ct in correlation_types:
for tm in thresholding_methods:
for tv in thresholding_values:
if tm=='userdefined':
tm = '%s-%.3f'%(tm,tv)
print(sub,ds,tm,ct)
corrGdir = glob.glob(rootdir + "/data/04_correlations/corr-%s/ds-%s/*%s*.gexf"
%(ct,ds,sub))
if not negative_corr:
adjGdir = glob.glob(rootdir + "/data/05_adjacency_matrices/positive/tm-*%s*/corr-%s/ds-%s/*%s*.gexf"%(tm,ct,ds,sub))
if negative_corr:
adjGdir = glob.glob(rootdir + "/data/05_adjacency_matrices/negative/tm-*%s*/corr-%s/ds-%s/*%s*.gexf"%(tm,ct,ds,sub))
corrG = nx.read_gexf(corrGdir[0])
adjG = nx.read_gexf(adjGdir[0])
# compute the giant component
adjG_gc = [adjG.subgraph(c).copy() for c in nx.connected_components(adjG)]
adjG_gc = adjG_gc[0]
corrG_gc = corrG.copy()
not_connected = set(corrG.nodes) - set(adjG_gc.nodes)
corrG_gc.remove_nodes_from(not_connected)
dirc = '%s/data/06_network_measures/positive/tm-%s/corr-%s/ds-%s/sub-%s'%(rootdir,tm,ct,ds,sub)
if negative_corr:
dirc = rootdir + '/dataframe/06_network_measures/negative/tm-%s/corr-%s/ds-%s/sub-%s'%(tm,ct,ds,sub)
if gl=='local':
if c=='disconnected':
gc_local_dir = glob.glob("%s/*local_measures_giant_component.csv"
%(dirc))
# Local Measures of the giant component:
gc_loc_mes_adj = pd.read_csv(gc_local_dir[0], sep='\t')
local_dir = glob.glob("%s/*local_measures.csv"%(dirc))
# Local measures of the whole graph:
loc_mes_adj = pd.read_csv(local_dir[0], sep='\t')
if mes_name in loc_mes_adj:
print('measure already computed for subject %s network or its giant component with %s DS, %s CT and %s TM.'%(sub,ds,ct,tm))
continue
del loc_mes_adj['Unnamed: 0']
del gc_loc_mes_adj['Unnamed: 0']
if bw == 'binary':
loc_new_mes = mes(adjG)
gc_loc_new_mes = mes(adjG_gc)
if bw == 'weighted':
loc_new_mes = mes(corrG)
gc_loc_new_mes = mes(corrG_gc)
loc_mes_adj[mes_name] = loc_new_mes.values()
gc_loc_mes_adj[mes_name] = gc_loc_new_mes.values()
loc_mes_adj_norm = normalize(loc_mes_adj)
loc_mes_adj_norm.ID = loc_mes_adj.ID
gc_loc_mes_adj_norm = normalize(gc_loc_mes_adj)
gc_loc_mes_adj_norm.ID = gc_loc_mes_adj.ID
loc_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
gc_loc_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
loc_mes_adj_norm.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_norm.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
gc_loc_mes_adj_norm.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component_norm.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
if c=='connected':
gc_local_dir = glob.glob("%s/*local_measures_giant_component.csv"
%(dirc))
# Local Measures of the giant component:
gc_loc_mes_adj = pd.read_csv(gc_local_dir[0], sep='\t')
if mes_name in gc_loc_mes_adj:
print('measure already computed for subject %s with %s DS, %s CT and %s TM'
%(sub,ds,ct,tm))
continue
del gc_loc_mes_adj['Unnamed: 0']
if bw == 'binary':
gc_loc_new_mes = mes(adjG_gc)
if bw == 'weighted':
gc_loc_new_mes = mes(corrG_gc)
gc_loc_mes_adj[mes_name] = gc_loc_new_mes.values()
gc_loc_mes_adj_norm = normalize(gc_loc_mes_adj)
gc_loc_mes_adj_norm.ID = gc_loc_mes_adj.ID
gc_loc_mes_adj.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
gc_loc_mes_adj_norm.to_csv \
('%s/sub-%s_ds-%s_corr-%s_tm-%s_local_measures_giant_component_norm.csv'
%(dirc,sub,ds,ct,tm), sep='\t')
elif gl=='global':
if c=='disconnected':
gc_global_dir = glob.glob("%s/*global_measures_giant_component.csv"
%(dirc))
global_dir = glob.glob("%s/*global_measures.csv"%(dirc))
# Global measures of the whole graph:
glob_mes_adj = pd.read_csv(global_dir[0], sep='\t')
# Global Measures of the giant component:
gc_glob_mes_adj = | pd.read_csv(gc_global_dir[0], sep='\t') | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from itertools import product
import calendar
class Features():
def __init__(self):
self.df = []
self.readFile()
def execute(self):
self.remove_ex_value()
self.remove_same_data()
self.augumentation()
self.create_test_col()
self.locate_feature()
self.encodeing()
self.time_feature()
self.history_saled_feature()
self.slide_window_feature()
self.three_month_buying_feature()
self.history_sum_feature()
self.another_feature()
self.save()
def readFile(self):
self.test = pd.read_csv('./data/test.csv')
self.sales = pd.read_csv('./data/sales_train.csv')
self.shops = pd.read_csv('./data/shops.csv')
self.items = pd.read_csv('./data/items.csv')
self.item_cats = pd.read_csv('./data/item_categories.csv')
def remove_ex_value(self):
self.train = self.sales[(self.sales.item_price < 100000) & (self.sales.item_price > 0)]
self.train = self.train[self.sales.item_cnt_day < 1001]
def remove_same_data(self):
self.train.loc[self.train.shop_id == 0, 'shop_id'] = 57
self.test.loc[self.test.shop_id == 0, 'shop_id'] = 57
self.train.loc[self.train.shop_id == 1, 'shop_id'] = 58
self.test.loc[self.test.shop_id == 1, 'shop_id'] = 58
self.train.loc[self.train.shop_id == 40, 'shop_id'] = 39
self.test.loc[self.test.shop_id == 40, 'shop_id'] = 39
def augumentation(self):
self.index_cols = ['shop_id', 'item_id', 'date_block_num']
for block_num in self.train['date_block_num'].unique():
cur_shops = self.train.loc[self.sales['date_block_num'] == block_num, 'shop_id'].unique()
cur_items = self.train.loc[self.sales['date_block_num'] == block_num, 'item_id'].unique()
self.df.append(np.array(list(product(*[cur_shops, cur_items, [block_num]])),dtype='int32'))
self.df = pd.DataFrame(np.vstack(self.df), columns = self.index_cols,dtype=np.int32)
self.group = self.train.groupby(['date_block_num','shop_id','item_id']).agg({'item_cnt_day': ['sum']})
self.group.columns = ['item_cnt_month']
self.group.reset_index(inplace=True)
self.df = pd.merge(self.df, self.group, on=self.index_cols, how='left')
self.df['item_cnt_month'] = (self.df['item_cnt_month']
.fillna(0)
.clip(0,20)
.astype(np.float16))
def create_test_col(self):
self.test['date_block_num'] = 34
self.test['date_block_num'] = self.test['date_block_num'].astype(np.int8)
self.test['shop_id'] = self.test['shop_id'].astype(np.int8)
self.test['item_id'] = self.test['item_id'].astype(np.int16)
df = pd.concat([self.df, self.test], ignore_index=True, sort=False, keys=self.index_cols)
df.fillna(0, inplace=True)
def locate_feature(self):
self.shops['city'] = self.shops['shop_name'].apply(lambda x: x.split()[0].lower())
self.shops.loc[self.shops.city == '!якутск', 'city'] = 'якутск'
self.shops['city_code'] = LabelEncoder().fit_transform(self.shops['city'])
coords = dict()
coords['якутск'] = (62.028098, 129.732555, 4)
coords['адыгея'] = (44.609764, 40.100516, 3)
coords['балашиха'] = (55.8094500, 37.9580600, 1)
coords['волжский'] = (53.4305800, 50.1190000, 3)
coords['вологда'] = (59.2239000, 39.8839800, 2)
coords['воронеж'] = (51.6720400, 39.1843000, 3)
coords['выездная'] = (0, 0, 0)
coords['жуковский'] = (55.5952800, 38.1202800, 1)
coords['интернет-магазин'] = (0, 0, 0)
coords['казань'] = (55.7887400, 49.1221400, 4)
coords['калуга'] = (54.5293000, 36.2754200, 4)
coords['коломна'] = (55.0794400, 38.7783300, 4)
coords['красноярск'] = (56.0183900, 92.8671700, 4)
coords['курск'] = (51.7373300, 36.1873500, 3)
coords['москва'] = (55.7522200, 37.6155600, 1)
coords['мытищи'] = (55.9116300, 37.7307600, 1)
coords['н.новгород'] = (56.3286700, 44.0020500, 4)
coords['новосибирск'] = (55.0415000, 82.9346000, 4)
coords['омск'] = (54.9924400, 73.3685900, 4)
coords['ростовнадону'] = (47.2313500, 39.7232800, 3)
coords['спб'] = (59.9386300, 30.3141300, 2)
coords['самара'] = (53.2000700, 50.1500000, 4)
coords['сергиев'] = (56.3000000, 38.1333300, 4)
coords['сургут'] = (61.2500000, 73.4166700, 4)
coords['томск'] = (56.4977100, 84.9743700, 4)
coords['тюмень'] = (57.1522200, 65.5272200, 4)
coords['уфа'] = (54.7430600, 55.9677900, 4)
coords['химки'] = (55.8970400, 37.4296900, 1)
coords['цифровой'] = (0, 0, 0)
coords['чехов'] = (55.1477000, 37.4772800, 4)
coords['ярославль'] = (57.6298700, 39.8736800, 2)
self.shops['city_coord_1'] = self.shops['city'].apply(lambda x: coords[x][0])
self.shops['city_coord_2'] = self.shops['city'].apply(lambda x: coords[x][1])
self.shops['country_part'] = self.shops['city'].apply(lambda x: coords[x][2])
self.shops = self.shops[['shop_id', 'city_code', 'city_coord_1', 'city_coord_2', 'country_part']]
self.df = pd.merge(self.df, self.shops, on=['shop_id'], how='left')
def encodeing(self):
map_dict = {
'Чистые носители (штучные)': 'Чистые носители',
'Чистые носители (шпиль)' : 'Чистые носители',
'PC ': 'Аксессуары',
'Служебные': 'Служебные '
}
self.items = pd.merge(self.items, self.item_cats, on='item_category_id')
self.items['item_category'] = self.items['item_category_name'].apply(lambda x: x.split('-')[0])
self.items['item_category'] = self.items['item_category'].apply(lambda x: map_dict[x] if x in map_dict.keys() else x)
self.items['item_category_common'] = LabelEncoder().fit_transform(self.items['item_category'])
self.items['item_category_code'] = LabelEncoder().fit_transform(self.items['item_category_name'])
self.items = self.items[['item_id', 'item_category_common', 'item_category_code']]
self.df = | pd.merge(self.df, self.items, on=['item_id'], how='left') | pandas.merge |
import os
import argparse
import pandas as pd
import numpy as np
from pandas.api.types import CategoricalDtype
import matplotlib
from scipy.interpolate import interp1d
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style='whitegrid')
sns.set_palette(sns.xkcd_palette(
['greyish', 'pale red', 'amber', 'dark grey']))
# Grade categories [3, 3+, ..., 9c+]
gradeType = CategoricalDtype(
[grade + plus for grade in
[str(number) for number in range(3, 6)] +
[str(number) + sub for number in range(6, 10) for sub in 'abc']
for plus in ['', '+']], ordered=True
)
# Ascent style categories
styleType = CategoricalDtype(
['Top rope', 'Red point', 'Flash', 'On-sight'], ordered=True
)
# Style for figure titles
titleSpec = {'fontsize': 14, 'color': 'C0', 'weight': 'bold'}
def crawl(username):
"""Crawl 27 Crags for the user's tick list
Args:
username (str): Name of user
"""
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
process = CrawlerProcess(get_project_settings())
process.crawl('27crags', user=username)
process.start()
def create_plot(data, xticks, ylim, **kwargs):
"""Create a scatter and yearly top10 avg. plot for the ticks in data,
along with a marginal histogram.
Args:
data (DataFrame): A pandas DataFrame containing the ticks
xticks (DatetimeIndex): Locations of xticks
ylim (tuple): Limits for the y-axis (grade cat codes)
**kwargs: Not used but needed for calls from FacetGrid
"""
ax = plt.gca()
# plot datapoints
sns.stripplot(x='date_ordinal', y='grade', data=data,
alpha=.3, hue='ascent_style', jitter=True, ax=ax)
# plot marginal histogram
divider = make_axes_locatable(ax)
marg = divider.append_axes('right', '15%', pad='3%', sharey=ax)
a = data.groupby(['ascent_style', 'grade'])['route'].count().reset_index()
left = np.zeros(len(gradeType.categories),)
for name, df in a.groupby('ascent_style'):
marg.barh(df.grade.cat.codes, df.route, left=left[df.grade.cat.codes],
linewidth=0)
left[df.grade.cat.codes] += df.route
for grade in np.unique(data.grade.cat.codes):
marg.text(left[grade], grade, ' %d' % left[grade], color='C0',
verticalalignment='center', horizontalalignment='left',
fontsize=8)
marg.axis('off')
# get yearly top10 average
top10 = data.groupby(['ascent_style', 'year'], as_index=False).apply(
lambda x: x.nlargest(min(10, len(x)), 'grade_ordinal'))
top10 = top10.groupby(['ascent_style', 'year']).agg(
{'grade_ordinal': np.mean, 'date_ordinal': np.mean}).rename(
columns={'grade_ordinal': 'top10'})
# plot interpolation of yearly top10
for name, df in top10.groupby('ascent_style'):
df.dropna(inplace=True)
if len(df) < 2:
continue
if len(df) == 2:
kind = 'linear'
elif len(df) == 3:
kind = 'quadratic'
else:
kind = 'cubic'
new_index = np.arange(df.date_ordinal.iloc[0],
df.date_ordinal.iloc[-1])
f = interp1d(df.date_ordinal.values, df.top10, kind=kind)
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import datetime
import itertools
import operator
from typing import List, Optional
import dataclasses
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[0])))
except ValueError:
raise ValueError(f'Cannot convert the string {tmp[0]} to a valid date.')
elif len(tmp) == 2:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), pd.Timestamp(tmp[1])))
except ValueError:
raise ValueError(
f'Cannot convert the strings in {tmp} to a valid date.')
else:
raise ValueError(f'The input {tmp} cannot be interpreted as a single' +
' day or a time window')
return days_exclude
def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:
"""Return a list of days to exclude from a list of TimeWindows.
Args:
periods: List of time windows (first day, last day).
Returns:
days_exclude: a List of obtained by expanding the list in input.
"""
days_exclude = []
for window in periods:
days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')
return list(set(days_exclude))
def overlap_percent(dates_left: List['datetime.datetime'],
dates_right: List['datetime.datetime']) -> float:
"""Find the size of the intersections of two arrays, relative to the first array.
Args:
dates_left: List of datetime.datetime
dates_right: List of datetime.datetime
Returns:
percentage: the percentage of elements of dates_right that also appear in
dates_left
"""
intersection = np.intersect1d(dates_left, dates_right)
percentage = 100 * len(intersection) / len(dates_right)
return percentage
def check_time_periods(geox_data: pd.DataFrame,
start_date_eval: pd.Timestamp,
start_date_aa_test: pd.Timestamp,
experiment_duration_weeks: int,
frequency: str) -> bool:
"""Checks that the geox_data contains the data for the two periods.
Check that the geox_data contains all observations during the evaluation and
AA test periods to guarantee that the experiment lasts exactly a certain
number of days/weeks, depending on the frequency of the data (daily/weekly).
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date_eval: start date of the evaluation period.
start_date_aa_test: start date of the aa test period.
experiment_duration_weeks: int, length of the experiment in weeks.
frequency: str indicating the frequency of the time series. It should be one
of 'infer', 'D', 'W'.
Returns:
bool: a bool, True if the time periods specified pass all the checks
Raises:
ValueError: if part of the evaluation or AA test period are shorter than
experiment_duration (either weeks or days).
"""
if frequency not in ['infer', 'D', 'W']:
raise ValueError(
f'frequency should be one of ["infer", "D", "W"], got {frequency}')
if frequency == 'infer':
tmp = geox_data.copy().set_index(['date', 'geo'])
frequency = infer_frequency(tmp, 'date', 'geo')
if frequency == 'W':
frequency = '7D'
number_of_observations = experiment_duration_weeks
else:
number_of_observations = 7 * experiment_duration_weeks
freq_str = 'weeks' if frequency == '7D' else 'days'
missing_eval = find_missing_dates(geox_data, start_date_eval,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_eval:
raise ValueError(
(f'The evaluation period contains the following {freq_str} ' +
f'{missing_eval} for which we do not have data.'))
missing_aa_test = find_missing_dates(geox_data, start_date_aa_test,
experiment_duration_weeks,
number_of_observations, frequency)
if missing_aa_test:
raise ValueError((f'The AA test period contains the following {freq_str} ' +
f'{missing_aa_test} for which we do not have data.'))
return True
def find_missing_dates(geox_data: pd.DataFrame, start_date: pd.Timestamp,
period_duration_weeks: int,
number_of_observations: int,
frequency: str) -> List[str]:
"""Find missing observations in a time period.
Args:
geox_data: pd.Dataframe with at least the columns (date, geo).
start_date: start date of the evaluation period.
period_duration_weeks: int, length of the period in weeks.
number_of_observations: expected number of time points.
frequency: str or pd.DateOffset indicating the frequency of the time series.
Returns:
missing: a list of strings, containing the dates for which data are missing
in geox_data.
"""
days = datetime.timedelta(days=7 * period_duration_weeks - 1)
period_dates = ((geox_data['date'] >= start_date) &
(geox_data['date'] <= start_date + days))
days_in_period = geox_data.loc[
period_dates, 'date'].drop_duplicates().dt.strftime('%Y-%m-%d').to_list()
missing = np.array([])
if len(days_in_period) != number_of_observations:
expected_observations = list(
pd.date_range(start_date, start_date + days,
freq=frequency).strftime('%Y-%m-%d'))
missing = set(expected_observations) - set(days_in_period)
return sorted(missing)
def infer_frequency(data: pd.DataFrame, date_index: str,
series_index: str) -> str:
"""Infers frequency of data from pd.DataFrame with multiple indices.
Infers frequency of data from pd.DataFrame with two indices, one for the slice
name and one for the date-time.
Example:
df = pd.Dataframe{'date': [2020-10-10, 2020-10-11], 'geo': [1, 1],
'response': [10, 20]}
df.set_index(['geo', 'date'], inplace=True)
infer_frequency(df, 'date', 'geo')
Args:
data: a pd.DataFrame for which frequency needs to be inferred.
date_index: string containing the name of the time index.
series_index: string containing the name of the series index.
Returns:
A str, either 'D' or 'W' indicating the most likely frequency inferred
from the data.
Raises:
ValueError: if it is not possible to infer frequency of sampling from the
provided pd.DataFrame.
"""
data = data.sort_values(by=[date_index, series_index])
# Infer most likely frequence for each series_index
series_names = data.index.get_level_values(series_index).unique().tolist()
series_frequencies = []
for series in series_names:
observed_times = data.iloc[data.index.get_level_values(series_index) ==
series].index.get_level_values(date_index)
n_steps = len(observed_times)
if n_steps > 1:
time_diffs = (
observed_times[1:n_steps] -
observed_times[0:(n_steps - 1)]).astype('timedelta64[D]').values
modal_frequency, _ = np.unique(time_diffs, return_counts=True)
series_frequencies.append(modal_frequency[0])
if not series_frequencies:
raise ValueError(
'At least one series with more than one observation must be provided.')
if series_frequencies.count(series_frequencies[0]) != len(series_frequencies):
raise ValueError(
'The provided time series seem to have irregular frequencies.')
try:
frequency = {
1: 'D',
7: 'W'
}[series_frequencies[0]]
except KeyError:
raise ValueError('Frequency could not be identified. Got %d days.' %
series_frequencies[0])
return frequency
def human_readable_number(number: float) -> str:
"""Print a large number in a readable format.
Return a readable format for a number, e.g. 123 milions becomes 123M.
Args:
number: a float to be printed in human readable format.
Returns:
readable_number: a string containing the formatted number.
"""
number = float('{:.3g}'.format(number))
magnitude = 0
while abs(number) >= 1000 and magnitude < 4:
magnitude += 1
number /= 1000.0
readable_number = '{}{}'.format('{:f}'.format(number).rstrip('0').rstrip('.'),
['', 'K', 'M', 'B', 'tn'][magnitude])
return readable_number
def change_background_row(df: pd.DataFrame, value: float, operation: str,
column: str):
"""Colors a row of a table based on the expression in input.
Color a row in:
- orange if the value of the column satisfies the expression in input
- beige if the value of the column satisfies the inverse expression in input
- green otherwise
For example, if the column has values [1, 2, 3] and we pass 'value' equal to
2, and operation '>', then
- 1 is marked in beige (1 < 2, which is the inverse expression)
- 2 is marked in green (it's not > and it's not <)
- 3 is marked in orange(3 > 2, which is the expression)
Args:
df: the table of which we want to change the background color.
value: term of comparison to be used in the expression.
operation: a string to define which operator to use, e.g. '>' or '='. For a
full list check _operator_functions.
column: name of the column to be used for the comparison
Returns:
pd.Series
"""
if _operator_functions[operation](float(df[column]), value):
return | pd.Series('background-color: orange', df.index) | pandas.Series |
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from evalml.pipelines import DelayedFeatureTransformer
@pytest.fixture
def delayed_features_data():
X = pd.DataFrame({"feature": range(1, 32)})
y = pd.Series(range(1, 32))
return X, y
def test_delayed_features_transformer_init():
delayed_features = DelayedFeatureTransformer(
max_delay=4,
delay_features=True,
delay_target=False,
date_index="Date",
random_seed=1,
)
assert delayed_features.parameters == {
"max_delay": 4,
"delay_features": True,
"delay_target": False,
"gap": 0,
"forecast_horizon": 1,
"date_index": "Date",
}
def encode_y_as_string(y):
y = y.astype("category")
y_answer = y.astype(int) - 1
y = y.map(lambda val: str(val).zfill(2))
return y, y_answer
def encode_X_as_string(X):
X_answer = X.astype(int) - 1
# So that the encoder encodes the values in ascending order. This makes it easier to
# specify the answer for each unit test
X.feature = pd.Categorical(X.feature.map(lambda val: str(val).zfill(2)))
return X, X_answer
def encode_X_y_as_strings(X, y, encode_X_as_str, encode_y_as_str):
y_answer = y
if encode_y_as_str:
y, y_answer = encode_y_as_string(y)
X_answer = X
if encode_X_as_str:
X, X_answer = encode_X_as_string(X)
return X, X_answer, y, y_answer
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay3_forecasthorizon1_gap0(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, gap=0, forecast_horizon=1).fit_transform(
X=X, y=y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, gap=0, forecast_horizon=1).fit_transform(
X=None, y=y
),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay5_forecasthorizon1_gap0(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"feature_delay_5": X_answer.feature.shift(5),
"feature_delay_6": X_answer.feature.shift(6),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=5, gap=0, forecast_horizon=1).fit_transform(
X, y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=5, gap=0, forecast_horizon=1).fit_transform(
X=None, y=y
),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay3_forecasthorizon7_gap1(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"feature_delay_11": X_answer.feature.shift(11),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X, y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X=None, y=y
),
)
def test_delayed_feature_extractor_numpy(delayed_features_data):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(X, y, False, False)
X_np = X.values
y_np = y.values
answer = pd.DataFrame(
{
"0_delay_8": X_answer.feature.shift(8),
"0_delay_9": X_answer.feature.shift(9),
"0_delay_10": X_answer.feature.shift(10),
"0_delay_11": X_answer.feature.shift(11),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X_np, y_np
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X=None, y=y_np
),
)
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_lagged_feature_extractor_delay_features_delay_target(
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
all_delays = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
if not delay_features:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "feature_" in c]
)
if not delay_target:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "target" in c]
)
transformer = DelayedFeatureTransformer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
)
assert_frame_equal(all_delays, transformer.fit_transform(X, y))
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_lagged_feature_extractor_delay_target(
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame()
if delay_target:
answer = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
transformer = DelayedFeatureTransformer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
)
assert_frame_equal(answer, transformer.fit_transform(None, y))
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@pytest.mark.parametrize("data_type", ["ww", "pd"])
def test_delay_feature_transformer_supports_custom_index(
encode_X_as_str, encode_y_as_str, data_type, make_data_type, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
X.index = pd.RangeIndex(50, 81)
X_answer.index = pd.RangeIndex(50, 81)
y.index = pd.RangeIndex(50, 81)
y_answer.index = pd.RangeIndex(50, 81)
answer = pd.DataFrame(
{
"feature_delay_7": X_answer.feature.shift(7),
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=pd.RangeIndex(50, 81),
)
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7).fit_transform(X, y),
)
answer_only_y = pd.DataFrame(
{
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=pd.RangeIndex(50, 81),
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7).fit_transform(
X=None, y=y
),
)
def test_delay_feature_transformer_multiple_categorical_columns(delayed_features_data):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(X, y, True, True)
X["feature_2"] = pd.Categorical(["a"] * 10 + ["aa"] * 10 + ["aaa"] * 10 + ["aaaa"])
X_answer["feature_2"] = pd.Series([0] * 10 + [1] * 10 + [2] * 10 + [3])
answer = pd.DataFrame(
{
"feature_delay_11": X_answer.feature.shift(11),
"feature_delay_12": X_answer.feature.shift(12),
"feature_2_delay_11": X_answer.feature_2.shift(11),
"feature_2_delay_12": X_answer.feature_2.shift(12),
"target_delay_11": y_answer.shift(11),
"target_delay_12": y_answer.shift(12),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=1, forecast_horizon=9, gap=2).fit_transform(
X, y
),
)
def test_delay_feature_transformer_y_is_none(delayed_features_data):
X, _ = delayed_features_data
answer = pd.DataFrame(
{
"feature_delay_11": X.feature.shift(11),
"feature_delay_12": X.feature.shift(12),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=1, forecast_horizon=11).fit_transform(
X, y=None
),
)
def test_delayed_feature_transformer_does_not_modify_input_data(delayed_features_data):
X, _ = delayed_features_data
expected = X.copy()
_ = DelayedFeatureTransformer(max_delay=1, forecast_horizon=11).fit_transform(
X, y=None
)
assert_frame_equal(X, expected)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(
pd.to_datetime(["20190902", "20200519", "20190607"] * 5, format="%Y%m%d")
),
pd.DataFrame(pd.Series([0, 0, 3, 1] * 5, dtype="int64")),
pd.DataFrame( | pd.Series([0, 0, 3.0, 2] * 5, dtype="float") | pandas.Series |
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with | option_context("use_inf_as_na", True) | pandas.option_context |
"""
BUILD_FEATURES Module
---------------------
@author : Stratoshad
This module contains function
that relate to data pre-processing
or aggregation.
"""
from tqdm import tqdm
import pandas as pd
import numpy as np
from datetime import datetime
import warnings
def process_cancellations(df, limit_rows=None):
"""
Takes in the dataframe of transactions
and identifies all cancellations. It
then runs through the following logic
to identify matches for those cancellations.
For each cancellation identifies all transactions
that have the same CustomerID, StockCode are in
the past and have the same or less Quantity. It excludes
cancellations with no CustomerID.
For cancellations with no matches it just takes
a note of the index. For single matches it adds
the canceled quantity to the original dataframe.
For multi-matches it either picks up the transaction
with an exact match on Quantity or keeps eliminating
transactions until it covers all cancellation
quantities.
Parameters:
-----------
df : dataframe
A dataframe of transactions that
has the "Cancelled" column
limit_rows : int (default : None)
Limits the numbers of cancellations to
look through. This is useful for testing. If
None looks through all of them.
Returns:
--------
df_clean : dataframe
A dataframe with all canceled transactions
dropped and the paired ones marked down.
match_dict : dictionary
A dictionary of all indices of the
cancellation transactions split by their
matched category
"""
# Create the main dataframes
df_clean = df.copy()
df_cancel = df_clean.loc[
(df_clean["Cancelled"] == 1) & (df_clean["CustomerID"] != "00000")
]
incomplete_cancelations = []
# Intilize the dictionary and the columns
match_dict = {"no_match": [], "one_match": [], "mult_match": []}
df_clean["Quantity_Canc"] = 0
df_clean["Cancel_Date"] = np.nan
if limit_rows is not None:
df_cancel = df_cancel.iloc[:limit_rows]
for index, row in tqdm(df_cancel.iterrows(), total=df_cancel.shape[0]):
# for index, row in df_cancel.iterrows():
# Extract all useful information
customer_id = row["CustomerID"]
stock_code = row["StockCode"]
canc_quantity = row["Quantity"]
canc_date = row["InvoiceDate"]
# Get all transactions that have the
# same customerID and Stock Code but
# happened earlier than the cancellation
df_tmp = df_clean.loc[
(df_clean["CustomerID"] == customer_id)
& (df_clean["StockCode"] == stock_code)
& (df_clean["InvoiceDate"] <= canc_date)
& (df_clean["Cancelled"] != 1)
]
# If we have no matches just record
# that cancelation as unmatches
if df_tmp.shape[0] == 0:
match_dict["no_match"].append(index)
# If we have only one match then take that
# as its match. Ensure we get the minimum between
# the quantity match and the available cancelations
elif df_tmp.shape[0] == 1:
matched = df_tmp.iloc[0]
quantity_bought = matched["Quantity"]
already_canc = matched["Quantity_Canc"]
# If we don't find enough purchases to match
# the cancelations then keep track of them
if quantity_bought < (canc_quantity * -1):
incomplete_cancelations.append(index)
if (quantity_bought - already_canc) >= (canc_quantity * -1):
match_dict["one_match"].append(index)
# Take the minimum between remainder and total bought
actual_cancel = min(quantity_bought, (canc_quantity * -1))
# Update the original dataframe
df_clean.loc[matched.name, "Quantity_Canc"] += actual_cancel
df_clean.loc[matched.name, "Cancel_Date"] = canc_date
# print()
# print(index)
# display(df_cancel.loc[index:index, :])
# display(df_tmp)
# print()
# print(f"{matched.name} was chosen with {actual_cancel} taken out of it.")
# display(df_clean.loc[matched.name:matched.name, :])
# print()
else:
match_dict["no_match"].append(index)
# In the case that we have more than one matches the follow
# rules apply. If there is an exact match to the quantity take the
# most recent one. Otherwise keep taking recent transactions until
# you get all total cancelations.
elif df_tmp.shape[0] > 1:
match_dict["mult_match"].append(index)
# print()
# print(index)
# display(df_cancel.loc[index:index, :])
# display(df_tmp)
# print()
# Check if there are any exact matches or greater matches of Quantity
exact_matches = df_tmp.loc[
(df_tmp["Quantity"] == (canc_quantity * -1))
& (
df_tmp["Quantity"]
>= (df_tmp["Quantity_Canc"] + (canc_quantity * -1))
)
]
if len(exact_matches) == 0:
# Loop through the array from bottom up
# and only mark transactions until you
# match the total quantity canceled
cum_quant = 0
for idx, r in df_tmp[::-1].iterrows():
quantity_bought = r["Quantity"] - r["Quantity_Canc"]
quantity_left = quantity_bought - r["Quantity_Canc"]
if quantity_left <= (canc_quantity * -1):
continue
elif cum_quant < (canc_quantity * -1):
# Ensure we are only assigning as much
# quantity as available
remainder = (canc_quantity * -1) - cum_quant
actual_cancel = min(quantity_bought, remainder)
cum_quant += actual_cancel
# print(f"Cancelled {actual_cancel} / {quantity_bought} of order {idx}")
# print(f"Added transaction {idx} and cum_quant is now: {cum_quant} / {canc_quantity * -1}")
# Update the original dataframe
df_clean.loc[idx, "Quantity_Canc"] += actual_cancel
df_clean.loc[idx, "Cancel_Date"] = canc_date
# Take the latest exact match as
# the correct transaction
else:
matched = exact_matches.iloc[-1]
idx = matched.name
actual_cancel = canc_quantity * -1
# Update the original dataframe
df_clean.loc[idx, "Quantity_Canc"] += actual_cancel
df_clean.loc[idx, "Cancel_Date"] = canc_date
# print(f"{idx} was chosen.")
# display(df_clean.loc[idx:idx, :])
# print()
# Print the summary
print(f"Total Cancelation Summary")
print(f"Total Cancelations: {df_cancel.shape[0]}")
print(
f"No-Matches: {len(match_dict['no_match'])} ({round((len(match_dict['no_match']) / df_cancel.shape[0] * 100), 1)}%)"
)
print(
f"Single-Matches: {len(match_dict['one_match'])} ({round((len(match_dict['one_match']) / df_cancel.shape[0] * 100), 1)}%)"
)
print(
f"Multi-Matches: {len(match_dict['mult_match'])} ({round((len(match_dict['mult_match']) / df_cancel.shape[0] * 100), 1)}%)"
)
# At the end ensure that we don't have any canceled quantities above
# the actual quantity except for Discounts
df_test = df_clean[
(df_clean["Cancelled"] != 1) & (df_clean["StockCode"] != "D")
].copy()
assert (
df_test["Quantity"] < df_test["Quantity_Canc"]
).sum() == 0, "There are transactions with canceled quantities > bought quantities"
return df_clean, match_dict
def get_df_date_features(date_df, date_column):
"""
Takes in a dataframe and the corresponding
date_column. From that it extracts the following
information:
- Month
- Month Name
- Day
- Week Num
- Season
- Year
- Is_Weekend
Parameters
----------
date_df: dataframe
A timeseries dataset that contains
a date column where features can be
extracted from.
date_column: str
Column name of where the dates
are in the dataframe
Returns
-------
edited_df: dateframe
Dataframe with the features mentioned
above added as columns
"""
# Copy the dataframe
df_edited = date_df.copy()
df_edited[date_column] = | pd.to_datetime(df_edited[date_column]) | pandas.to_datetime |
#!/usr/bin/env python3
import requests
import json
import pandas as pd
import tweepy
import os
import config as cfg
from datetime import datetime, timedelta
from pytz import timezone
def main():
# get data
nys_data = get_nys_data()
nys = get_nys_appt(nys_data, cfg.config["nys_sites"])
alb = get_nys_appt(nys_data, cfg.config["alb_sites"])
cvs = get_cvs_data()
pc = get_pc_data()
wal = get_walgreens_data()
# book urls
nys_url = 'https://am-i-eligible.covid19vaccine.health.ny.gov/'
cvs_url = 'https://www.cvs.com/immunizations/covid-19-vaccine'
wal_url = 'https://www.walgreens.com/findcare/vaccination/covid-19/location-screening'
pc_url = 'https://www.pricechopper.com/covidvaccine/new-york/'
# img urls
nys_img = '<img alt="" src="https://favicons.githubusercontent.com/am-i-eligible.covid19vaccine.health.ny.gov" height="13">'
cvs_img = '<img alt="" src="https://favicons.githubusercontent.com/www.cvs.com" height="13">'
wal_img = '<img alt="" src="https://favicons.githubusercontent.com/www.walgreens.com" height="13">'
pc_img = '<img alt="" src="https://favicons.githubusercontent.com/www.pricechopper.com" height="13">'
tz = timezone('EST')
date = str(datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'))
sites = ['SUNY Albany','Albany Armory','Price Chopper','CVS','Walgreens']
appointments = [ nys, alb, pc, cvs, wal ]
df_long = | pd.DataFrame({'date': date, 'appointments': appointments, 'sites': sites}) | pandas.DataFrame |
# being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if isinstance(colors, compat.string_types):
import matplotlib.colors
conv = matplotlib.colors.ColorConverter()
def _maybe_valid_colors(colors):
try:
[conv.to_rgba(c) for c in colors]
return True
except ValueError:
return False
# check whether the string can be convertable to single color
maybe_single_color = _maybe_valid_colors([colors])
# check whether each character can be convertable to colors
maybe_color_cycle = _maybe_valid_colors(list(colors))
if maybe_single_color and maybe_color_cycle and len(colors) > 1:
msg = ("'{0}' can be parsed as both single color and "
"color cycle. Specify each color using a list "
"like ['{0}'] or {1}")
raise ValueError(msg.format(colors, list(colors)))
elif maybe_single_color:
colors = [colors]
else:
# ``colors`` is regarded as color cycle.
# mpl will raise error any of them is invalid
pass
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Frequency')
else:
for ax in self.axes:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = | remove_na(y) | pandas.core.series.remove_na |
import copy
import os, sys
import numpy as np
import pandas as pd
import matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats.mstats import zscore
# loa my modules
from src.utils import load_pkl
from src.visualise import *
import joblib
import pickle
# Built-in modules #
import random
# Third party modules #
import numpy, scipy, matplotlib, pandas
from matplotlib import pyplot
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance as dist
import names
model_path = './models/SCCA_Yeo7nodes_revision_4_0.80_0.50.pkl'
label_path = '~/yeo7_cluster/names.csv'
dat_path = './data/processed/dict_SCCA_data_prepro_revision1.pkl'
# load data
model = joblib.load(model_path)
dataset = load_pkl(dat_path)
df_label = pd.read_csv(label_path)
#df = pd.read_pickle(df_path)
u, v = model.u * [1, 1, -1, 1] , model.v * [1, 1, -1, 1]
n = model.n_components
# create labels for the nodes
seed_names = df_label.iloc[:, 0].apply(str) + '-' + df_label.iloc[:, -2] + '-' + df_label.iloc[:, -3] + ' ' + df_label.iloc[:, -1]
# unflatten the functional corr coeff
from src.utils import unflatten
mat = []
for i in range(4):
mat.append(unflatten(u[:, i]))
# plot size
MY_FIX_SIZE = (13, 10)
###############################################################################
# Create Custom Color Gradients #
red_black_sky = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.9), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
red_black_blue = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
red_black_green = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),
'green': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
yellow_black_blue = {'red': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.1), (1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.8), (0.5, 0.1, 0.0), (1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 1.0), (0.5, 0.1, 0.0), (1.0, 0.0, 0.0))}
make_cmap = lambda x: matplotlib.colors.LinearSegmentedColormap('my_colormap', x, 256)
color_gradients = {'red_black_sky' : make_cmap(red_black_sky),
'red_black_blue' : make_cmap(red_black_blue),
'red_black_green' : make_cmap(red_black_green),
'yellow_black_blue' : make_cmap(yellow_black_blue),
'red_white_blue' : pyplot.cm.bwr,
'seismic' : pyplot.cm.seismic,
'green_white_purple' : pyplot.cm.PiYG_r,
'coolwarm' : pyplot.cm.coolwarm,}
###############################################################################
def plot_hierarchical_heatmap_core(frame):
"""A common use case for biologists analyzing their gene expression data is to cluster and visualize patterns of expression in the form of a heatmap and associated dendrogram."""
row_method = 'single' # Can be: linkage, single, complete, average, weighted, centroid, median, ward
column_method = 'single' # Can be: linkage, single, complete, average, weighted, centroid, median, ward
row_metric = 'braycurtis' # Can be: see scipy documentation
column_metric = 'braycurtis' # Can be: see scipy documentation
gradient_span = 'min_to_max_centered' # Can be: min_to_max, min_to_max_centered, only_max, only_min
color_gradient = 'coolwarm' # Can be: see color_gradients dictionary
fig_weight = MY_FIX_SIZE[0]
fig_height = MY_FIX_SIZE[1]
# Names #
row_header = frame.index
column_header = frame.columns
# What color to use #
cmap = color_gradients[color_gradient]
# Scale the max and min colors #
value_min = frame.min().min()
value_max = frame.max().max()
if gradient_span == 'min_to_max_centered':
value_max = max([value_max, abs(value_min)])
value_min = value_max * -1
if gradient_span == 'only_max': value_min = 0
if gradient_span == 'only_min': value_max = 0
norm = matplotlib.colors.Normalize(value_min, value_max)
# Scale the figure window size #
fig = pyplot.figure(figsize=(fig_weight, fig_height))
# Calculate positions for all elements #
# ax1, placement of dendrogram 1, on the left of the heatmap
### The second value controls the position of the matrix relative to the bottom of the view
[ax1_x, ax1_y, ax1_w, ax1_h] = [0.05, 0.22, 0.2, 0.6]
width_between_ax1_axr = 0.004
### distance between the top color bar axis and the matrix
height_between_ax1_axc = 0.004
### Sufficient size to show
color_bar_w = 0.015
# axr, placement of row side colorbar #
### second to last controls the width of the side color bar - 0.015 when showing
[axr_x, axr_y, axr_w, axr_h] = [0.31, 0.1, color_bar_w, 0.6]
axr_x = ax1_x + ax1_w + width_between_ax1_axr
axr_y = ax1_y; axr_h = ax1_h
width_between_axr_axm = 0.004
# axc, placement of column side colorbar #
### last one controls the hight of the top color bar - 0.015 when showing
[axc_x, axc_y, axc_w, axc_h] = [0.4, 0.63, 0.5, color_bar_w]
axc_x = axr_x + axr_w + width_between_axr_axm
axc_y = ax1_y + ax1_h + height_between_ax1_axc
height_between_axc_ax2 = 0.004
# axm, placement of heatmap for the data matrix #
[axm_x, axm_y, axm_w, axm_h] = [0.4, 0.9, 2.5, 0.5]
axm_x = axr_x + axr_w + width_between_axr_axm
axm_y = ax1_y; axm_h = ax1_h
axm_w = axc_w
# ax2, placement of dendrogram 2, on the top of the heatmap #
### last one controls hight of the dendrogram
[ax2_x, ax2_y, ax2_w, ax2_h] = [0.3, 0.72, 0.6, 0.15]
ax2_x = axr_x + axr_w + width_between_axr_axm
ax2_y = ax1_y + ax1_h + height_between_ax1_axc + axc_h + height_between_axc_ax2
ax2_w = axc_w
# axcb - placement of the color legend #
[axcb_x, axcb_y, axcb_w, axcb_h] = [0.07, 0.88, 0.18, 0.09]
# Compute and plot top dendrogram #
if column_method:
d2 = dist.pdist(frame.transpose())
D2 = dist.squareform(d2)
ax2 = fig.add_axes([ax2_x, ax2_y, ax2_w, ax2_h], frame_on=True)
Y2 = sch.linkage(D2, method=column_method, metric=column_metric)
Z2 = sch.dendrogram(Y2)
ind2 = sch.fcluster(Y2, 0.7*max(Y2[:,2]), 'distance')
ax2.set_xticks([])
ax2.set_yticks([])
### apply the clustering for the array-dendrograms to the actual matrix data
idx2 = Z2['leaves']
frame = frame.iloc[:,idx2]
### reorder the flat cluster to match the order of the leaves the dendrogram
ind2 = ind2[idx2]
else: idx2 = range(frame.shape[1])
# Compute and plot left dendrogram #
if row_method:
d1 = dist.pdist(frame)
D1 = dist.squareform(d1)
ax1 = fig.add_axes([ax1_x, ax1_y, ax1_w, ax1_h], frame_on=True)
Y1 = sch.linkage(D1, method=row_method, metric=row_metric)
Z1 = sch.dendrogram(Y1, orientation='right')
ind1 = sch.fcluster(Y1, 0.7*max(Y1[:,2]), 'distance')
ax1.set_xticks([])
ax1.set_yticks([])
### apply the clustering for the array-dendrograms to the actual matrix data
idx1 = Z1['leaves']
frame = frame.iloc[idx1,:]
### reorder the flat cluster to match the order of the leaves the dendrogram
ind1 = ind1[idx1]
else: idx1 = range(frame.shape[0])
# Plot distance matrix #
axm = fig.add_axes([axm_x, axm_y, axm_w, axm_h])
axm.matshow(frame, aspect='auto', origin='lower', cmap=cmap, norm=norm)
axm.set_xticks([])
axm.set_yticks([])
# Add text #
new_row_header = []
new_column_header = []
for i in range(frame.shape[0]):
axm.text(frame.shape[1]-0.5, i, ' ' + row_header[idx1[i]], verticalalignment="center")
new_row_header.append(row_header[idx1[i]] if row_method else row_header[i])
for i in range(frame.shape[1]):
axm.text(i, -0.9, ' '+column_header[idx2[i]], rotation=90, verticalalignment="top", horizontalalignment="center")
new_column_header.append(column_header[idx2[i]] if column_method else column_header[i])
# Plot column side colorbar #
if column_method:
axc = fig.add_axes([axc_x, axc_y, axc_w, axc_h])
cmap_c = matplotlib.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
dc = numpy.array(ind2, dtype=int)
dc.shape = (1,len(ind2))
axc.matshow(dc, aspect='auto', origin='lower', cmap=cmap_c)
axc.set_xticks([])
axc.set_yticks([])
# Plot column side colorbar #
if row_method:
axr = fig.add_axes([axr_x, axr_y, axr_w, axr_h])
dr = numpy.array(ind1, dtype=int)
dr.shape = (len(ind1),1)
cmap_r = matplotlib.colors.ListedColormap(['r', 'g', 'b', 'y', 'w', 'k', 'm'])
axr.matshow(dr, aspect='auto', origin='lower', cmap=cmap_r)
axr.set_xticks([])
axr.set_yticks([])
# Plot color legend #
### axes for colorbar
axcb = fig.add_axes([axcb_x, axcb_y, axcb_w, axcb_h], frame_on=False)
cb = matplotlib.colorbar.ColorbarBase(axcb, cmap=cmap, norm=norm, orientation='horizontal')
axcb.set_title("colorkey")
max_cb_ticks = 5
axcb.xaxis.set_major_locator(pyplot.MaxNLocator(max_cb_ticks))
# Render the graphic #
if len(row_header)>50 or len(column_header)>50: pyplot.rcParams['font.size'] = 5
else: pyplot.rcParams['font.size'] = 8
# Return figure #
return fig, axm, axcb, cb
###############################################################################
def plot_hierarchical_heatmap(data_matrix, row_names, column_names):
df = | pandas.DataFrame(data_matrix, index=row_names, columns=column_names) | pandas.DataFrame |
#!/usr/bin/env python
"""
This is used to select sites with the highest measurement ratios of Qle, Qh
and NEE. Sites are selected where all ratios are above 0.9 or 0.8. Also, sites are
selected where Qle and Qh are above 0.8 or 0.9.The maps from this script are
with zoom. Data generatred by method2.py script.
"""
__author__ = "<NAME>"
__version__ = "1.0 (25.10.2018)"
__email__ = "<EMAIL>"
# Import packages
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pandas as pd
import matplotlib.patches as mpatches
import os
import matplotlib
from mpl_toolkits.axes_grid.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid.inset_locator import mark_inset
def zoommap(vals, title, label):
fig = plt.figure(figsize=(12,8))
plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,
wspace=0.15,hspace=0.05)
ax = plt.subplot(211)
m = Basemap(projection = 'mill', llcrnrlat = -45, llcrnrlon = -160,
urcrnrlat= 82, urcrnrlon = 170, resolution = 'c')
m.drawcoastlines(linewidth = 0.5)
plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,
wspace=0.15,hspace=0.05)
for row in vals:
x,y = m(vals.lon.values, vals.lat.values)
m.scatter(x,y, s = 20, color = vals.color.values)
ax.text(0.03,0.95, label, size = 12, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
plt.title(title, fontsize = 12)
#Zoom Europe
axins_1 = zoomed_inset_axes(ax, 2, loc=2, bbox_to_anchor=(0.42, 0.48),
bbox_transform=ax.figure.transFigure)
axins_1.scatter(x, y, s = 20, c = vals.color.values)
m.drawcoastlines(linewidth = 0.5)
x2,y2 = m(-12,35)
x3,y3 = m(40,65)
axins_1.set_xlim(x2,x3)
axins_1.set_ylim(y2,y3)
axes = mark_inset(ax, axins_1, loc1=1, loc2=2, linewidth=1)
#Zoom Australia
axins_2 = zoomed_inset_axes(ax, 2.2, loc=3, bbox_to_anchor=(0.61, 0.255),
bbox_transform=ax.figure.transFigure)
axins_2.scatter(x, y, s = 20, c = vals.color.values)
m.drawcoastlines(linewidth = 0.5)
x2,y2 = m(110,-43)
x3,y3 = m(155,-10)
axins_2.set_xlim(x2,x3)
axins_2.set_ylim(y2,y3)
axes = mark_inset(ax, axins_2, loc1=1, loc2=2,linewidth=1)
#Zoom US
axins_3 = zoomed_inset_axes(ax, 1.6, loc=3, bbox_to_anchor=(0.21, 0.25),
bbox_transform=ax.figure.transFigure)
axins_3.scatter(x, y, s = 20, c = vals.color.values)
m.drawcoastlines(linewidth = 0.5)
x2,y2 = m(-130,22)
x3,y3 = m(-60,63)
axins_3.set_xlim(x2,x3)
axins_3.set_ylim(y2,y3)
axes = mark_inset(ax, axins_3, loc1=1, loc2=2, linewidth=1)
return(fig, axes)
with open("../data/processed/results_LH.csv", newline='') as myFile:
df_results_LH = pd.read_csv(myFile)
for row in df_results_LH:
print(row)
with open("../data/processed/results_SH.csv", newline='') as myFile:
df_results_SH = pd.read_csv(myFile)
for row in df_results_SH:
print(row)
with open("../data/processed/results_NEE.csv", newline='') as myFile:
df_results_NEE = | pd.read_csv(myFile) | pandas.read_csv |
import pandas as pd
import pandas as pd
sample1 = pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample2 = pd.read_table('MUT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample3 = pd.read_table('MUT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample4 = pd.read_table('MUT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
sample5 = pd.read_table('MUT-6_2.annotate.csv', sep='\t', index_col=0)["score"]
sample6 = pd.read_table('WT-1_2.annotate.csv', sep='\t', index_col=0)["score"]
sample7 = pd.read_table('WT-2_2.annotate.csv', sep='\t', index_col=0)["score"]
sample8 = pd.read_table('WT-3_2.annotate.csv', sep='\t', index_col=0)["score"]
sample9 = pd.read_table('WT-4_2.annotate.csv', sep='\t', index_col=0)["score"]
sample10 = pd.read_table('WT-5_2.annotate.csv', sep='\t', index_col=0)["score"]
#
meta1 = | pd.read_table('MUT-1_2.annotate.csv', sep='\t', index_col=0) | pandas.read_table |
'''
This method uses these features
['dow', 'year', 'month', 'day_of_week', 'holiday_flg', 'min_visitors', 'mean_visitors', 'median_visitors', 'max_visitors', 'count_observations', 'air_genre_name', 'air_area_name', 'latitude', 'longitude', 'rs1_x', 'rv1_x', 'rs2_x', 'rv2_x', 'rs1_y', 'rv1_y', 'rs2_y', 'rv2_y', 'total_reserv_sum', 'total_reserv_mean', 'total_reserv_dt_diff_mean']
RMSE GradientBoostingRegressor: 0.501477019571
RMSE KNeighborsRegressor: 0.421517079307
'''
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
def RMSLE(y, pred):
return metrics.mean_squared_error(y, pred)**0.5
data = {
'tra': pd.read_csv('./data/air_visit_data.csv'),
'as': pd.read_csv('./data/air_store_info.csv'),
'hs': pd.read_csv('./data/hpg_store_info.csv'),
'ar': pd.read_csv('./data/air_reserve.csv'),
'hr': pd.read_csv('./data/hpg_reserve.csv'),
'id': pd.read_csv('./data/store_id_relation.csv'),
'tes': pd.read_csv('./data/sample_submission.csv'),
'hol': pd.read_csv('./data/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
# add 'air_store_id' to the last of data['hr']
data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id'])
for df in ['ar', 'hr']:
# get year, month, day, get rid of time
data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime'])
data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date
data[df]['reserve_datetime'] = pd.to_datetime(data[df]['reserve_datetime'])
data[df]['reserve_datetime'] = data[df]['reserve_datetime'].dt.date
data[df]['reserve_datetime_diff'] = data[df].apply(lambda r: (r['visit_datetime'] - r['reserve_datetime']).days,
axis=1)
tmp1 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].sum().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs1', 'reserve_visitors': 'rv1'})
tmp2 = data[df].groupby(['air_store_id', 'visit_datetime'], as_index=False)[
['reserve_datetime_diff', 'reserve_visitors']].mean().rename(
columns={'visit_datetime': 'visit_date', 'reserve_datetime_diff': 'rs2', 'reserve_visitors': 'rv2'})
data[df] = pd.merge(tmp1, tmp2, how='inner', on=['air_store_id', 'visit_date'])
data['tra']['visit_date'] = pd.to_datetime(data['tra']['visit_date'])
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date
data['tes']['visit_date'] = data['tes']['id'].map(lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date
unique_stores = data['tes']['air_store_id'].unique()
# count week
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'week_min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'week_mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'week_median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'week_max_visitors'})
stores = | pd.merge(stores, tmp, how='left', on=['air_store_id','dow']) | pandas.merge |
import numpy as np
from pandas import DataFrame, Series
from scipy import stats
from Common.Measures.Portfolio.AbstractPortfolioMeasure import AbstractPortfolioMeasure
from Common.StockMarketIndex.AbstractStockMarketIndex import AbstractStockMarketIndex
class PortfolioLinearReg(AbstractPortfolioMeasure):
_index: AbstractStockMarketIndex
_alpha: float = -1.1
_beta: float = -1.1
_r_val: float = -1.1
_p_val: float = -1.1
_std_err: float = -1.1
def __init__(self, an_index: AbstractStockMarketIndex, portfolio_df_returns: DataFrame = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool STATES
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
from IPython.display import display
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'SF_States')
statedatafolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'STATEs')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[4]:
r"""
reedsFile = str(Path().resolve().parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v2a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="UPV Capacity (GW)")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
"""
# ### Reading GIS inputs
# In[5]:
r"""
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
GIS.head()
GIS.loc['p1'].long
"""
# ### Create Scenarios in PV_ICE
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[6]:
SFscenarios = ['Reference.Mod', '95-by-35.Adv', '95-by-35_Elec.Adv_DR']
SFscenarios
# In[7]:
STATEs = ['WA', 'CA', 'VA', 'FL', 'MI', 'IN', 'KY', 'OH', 'PA', 'WV', 'NV', 'MD',
'DE', 'NJ', 'NY', 'VT', 'NH', 'MA', 'CT', 'RI', 'ME', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM',
'SD', 'CO', 'ND', 'NE', 'MN', 'IA', 'WI', 'TX', 'OK', 'OR', 'KS', 'MO', 'AR', 'LA', 'IL', 'MS',
'AL', 'TN', 'GA', 'SC', 'NC']
# ### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[8]:
MATERIALS = ['glass', 'silicon', 'silver','copper','aluminium','backsheet','encapsulant']
# In[9]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r1.createScenario(name=STATEs[jj], file=filetitle)
r1.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r2.createScenario(name=STATEs[jj], file=filetitle)
r2.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r3.createScenario(name=STATEs[jj], file=filetitle)
r3.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
# # Calculate Mass Flow
# In[10]:
r1.scenMod_noCircularity()
r2.scenMod_noCircularity()
r3.scenMod_noCircularity()
IRENA= False
PERFECTMFG = False
ELorRL = 'RL'
if IRENA:
r1.scenMod_IRENIFY(ELorRL=ELorRL)
r2.scenMod_IRENIFY(ELorRL=ELorRL)
r3.scenMod_IRENIFY(ELorRL=ELorRL)
if PERFECTMFG:
r1.scenMod_PerfectManufacturing()
r2.scenMod_PerfectManufacturing()
r3.scenMod_PerfectManufacturing()
# In[11]:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
# In[12]:
print("STATEs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[STATEs[jj]].data.keys())
print("Material Keys: ", r1.scenario[STATEs[jj]].material['glass'].materialdata.keys())
# # OPEN EI
# In[13]:
kk=0
SFScenarios = [r1, r2, r3]
SFScenarios[kk].name
# In[14]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI.csv', index=False)
print("Done")
# In[15]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = | pd.DataFrame() | pandas.DataFrame |
import os
import random
import numpy
import pandas as pd
import tensorflow as tf
import grpc
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import regression_pb2
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from tensorflow_serving.apis import classification_pb2
from flask import Flask,render_template, request
app = Flask(__name__)
app.config["UPLOAD_FOLDER"]=os.getcwd()
TF_MODEL_SERVER_HOST = os.getenv("TF_MODEL_SERVER_HOST", "127.0.0.1")
TF_MODEL_SERVER_PORT = int(os.getenv("TF_MODEL_SERVER_PORT", 9000))
#server="10.111.238.238:9000"
server = str(TF_MODEL_SERVER_HOST)+":"+str(TF_MODEL_SERVER_PORT)
channel = grpc.insecure_channel(server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = classification_pb2.ClassificationRequest()
request.model_spec.name = 'Model_Blerssi'
request.model_spec.signature_name = 'serving_default'
try :
response = stub.Classify(request, 10.0)
except Exception as e:
pass
server = str(TF_MODEL_SERVER_HOST)+":"+str(TF_MODEL_SERVER_PORT)
print(server)
#channel = grpc.insecure_channel(server)
#stub = helloworld_pb2_grpc.GreeterStub(channel)
#response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
#print(response)
label_dict = {'O02': 0, 'P01': 1, 'P02': 2, 'R01': 3, 'R02': 4, 'S01': 5, 'S02': 6, 'T01': 7, 'U02': 8, 'U01': 9, 'J03': 10,
'K03': 11, 'L03': 12, 'M03': 13, 'N03': 14, 'O03': 15, 'P03': 16, 'Q03': 17, 'R03': 18, 'S03': 19, 'T03': 20,
'U03': 21, 'U04': 22, 'T04': 23, 'S04': 24, 'R04': 25, 'Q04': 26, 'P04': 27, 'O04': 28, 'N04': 29, 'M04': 30,
'L04': 31, 'K04': 32, 'J04': 33, 'I04': 34, 'I05': 35, 'J05': 36, 'K05': 37, 'L05': 38, 'M05': 39, 'N05': 40,
'O05': 41, 'P05': 42, 'Q05': 43, 'R05': 44, 'S05': 45, 'T05': 46, 'U05': 47, 'S06': 48, 'R06': 49, 'Q06': 50,
'P06': 51, 'O06': 52, 'N06': 53, 'M06': 54, 'L06': 55, 'K06': 56, 'J06': 57, 'I06': 58, 'F08': 59, 'J02': 60,
'J07': 61, 'I07': 62, 'I10': 63, 'J10': 64, 'D15': 65, 'E15': 66, 'G15': 67, 'J15': 68, 'L15': 69, 'R15': 70,
'T15': 71, 'W15': 72, 'I08': 73, 'I03': 74, 'J08': 75, 'I01': 76, 'I02': 77, 'J01': 78, 'K01': 79, 'K02': 80,
'L01': 81, 'L02': 82, 'M01': 83, 'M02': 84, 'N01': 85, 'N02': 86, 'O01': 87, 'I09': 88, 'D14': 89, 'D13': 90,
'K07': 91, 'K08': 92, 'N15': 93, 'P15': 94, 'I15': 95, 'S15': 96, 'U15': 97, 'V15': 98, 'S07': 99, 'S08': 100,
'L09': 101, 'L08': 102, 'Q02': 103, 'Q01': 104}
def get_key(val):
for key, value in label_dict.items():
if val == value:
return key
@app.route('/')
def form():
return render_template("login.html")
@app.route('/transform', methods=['POST'])
def transform_view():
from flask import request
csv = request.files['file']
csv.save("iBeacon_RSSI_Unlabeled.csv")
BLE_RSSI_UL = | pd.read_csv("iBeacon_RSSI_Unlabeled.csv", encoding='utf8') | pandas.read_csv |
import sys
sys.path.append('C:\PhD_Chalmers\Python\Lib\site-packages') ## adding directory
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
today = datetime.date.today()
from m1_Class_Plants import pp_list,fuel_list,biomass
from m2_demand_slices import q0_hr_slice,q0t,availability_levels_solar,availability_levels_wind
from m2_slices import slice_hrs
from m3_1_availability import fun_availability
from m3_2_dispatch_order import fun_rank_dispatch
from m4_demand_supply import fun_demand_supply
from m5_utilization import fun_pp_utilization
from m7_plot_dispatch_slice import func_plot_dispatch_slice
from m7_plot_dispatch_yr import func_plot_dispatch_yr
from m10_switch_bio import func_switch_bio
from m11_func_switch_type import func_switch_type
pd.set_option('display.max_columns', 12)
pd.options.mode.chained_assignment = None
import pdb
# ~ pdb.set_trace() ##set breakpoint
# =============================================================================
##1.1import data: installed_capacity.
file_path1 = "output/tables/installed/"
file_name1 = "installed_capacity_2019-06-17.xlsx"
# ~ file_comp0 = "yr_installed_comp0_heter_2019-05-30.xlsx"
# ~ file_comp1 = "yr_installed_comp1_heter_2019-05-30.xlsx"
# ~ file_comp2 = "yr_installed_comp2_heter_2019-05-30.xlsx"
df_installed = pd.read_excel(file_path1+file_name1,header=0,index_col=0)
df_installed.fillna(value=0,inplace=True)##fill the NaN vaulue with 0.
# ~ print(df_installed)
n =len(df_installed.columns)#length of columns.
df_installed.set_axis(np.array(['capacity']*n), axis='columns', inplace=True) ##rename column name to 'capacity'.
##on individual agent level.
# ~ comp0_installed = pd.read_excel(file_path1+file_comp0,header=0,index_col=0)
# ~ comp1_installed = pd.read_excel(file_path1+file_comp1,header=0,index_col=0)
# ~ comp2_installed = pd.read_excel(file_path1+file_comp2,header=0,index_col=0)
# ~ comp0_installed.fillna(value=0,inplace=True)
# ~ comp1_installed.fillna(value=0,inplace=True)
# ~ comp2_installed.fillna(value=0,inplace=True)
# ~ comp0_installed.set_axis(np.array(['capacity']*n), axis='columns', inplace=True) ##rename column name to 'capacity'.
# ~ comp1_installed.set_axis(np.array(['capacity']*n), axis='columns', inplace=True) ##rename column name to 'capacity'.
# ~ comp2_installed.set_axis(np.array(['capacity']*n), axis='columns', inplace=True) ##rename column name to 'capacity'.
# =============================================================================
##1.2.concating attributes (running cost, emission intensity)##
df_attribute = pd.DataFrame({'running_cost': [pp.running_cost for pp in pp_list],
'emission_intensity': [pp.emission_intensity for pp in pp_list]},index=[pp.plant_type for pp in pp_list])
df_pp_pi = pd.concat([df_attribute,df_installed], axis=1, sort=True, copy=False )##concat two df.
# ~ comp0_installed = pd.concat([df_attribute,comp0_installed], axis=1, sort=True, copy=False )##concat two df.
# ~ comp1_installed = pd.concat([df_attribute,comp1_installed], axis=1, sort=True, copy=False )##concat two df.
# ~ comp2_installed = pd.concat([df_attribute,comp2_installed], axis=1, sort=True, copy=False )##concat two df.
df_pp_pi.reset_index(level=0, inplace=True)## reset index
# ~ comp0_installed.reset_index(level=0, inplace=True)## reset index
# ~ comp1_installed.reset_index(level=0, inplace=True)## reset index
# ~ comp2_installed.reset_index(level=0, inplace=True)## reset index
df_pp_pi.rename({'index':'plant_type'}, axis='columns',inplace=True) ##rename column.
# ~ comp0_installed.rename({'index':'plant_type'}, axis='columns',inplace=True) ##rename column.
# ~ comp1_installed.rename({'index':'plant_type'}, axis='columns',inplace=True) ##rename column.
# ~ comp2_installed.rename({'index':'plant_type'}, axis='columns',inplace=True) ##rename column.
# ~ print(df_pp_pi)
# =============================================================================
##1.3 set-up empty df for plottings.
yr_dispatched_series = pd.DataFrame(index=[pp.plant_type for pp in fuel_list])
slice_dispatched_series = pd.DataFrame(index=[pp.plant_type for pp in fuel_list])
# ~ pdb.set_trace() ##set breakpoint
##1.4 set-up list for plottings.
sliced_hours = np.reshape(q0_hr_slice, 64, order='C')
annual_price_average1 = np.array([]) ## for recording average electricity price of each year. ##average by production.
##1.5 set-up empty df for recording results(profit)
yr_revenue_series = pd.DataFrame(index=[pp.plant_type for pp in pp_list])
#system level
yr_operate_profit_series = pd.DataFrame(index=[pp.plant_type for pp in pp_list])
##individual agent level.
# ~ yr_operate_profit_comp0_series = pd.DataFrame(index=[pp.plant_type for pp in pp_list])
# ~ yr_operate_profit_comp1_series = pd.DataFrame(index=[pp.plant_type for pp in pp_list])
# ~ yr_operate_profit_comp2_series = pd.DataFrame(index=[pp.plant_type for pp in pp_list])
# ============================================================================
##2.calculating el. price, dispatch, revenue and ect.
for year in range (n):
print('\n' +'----- year: '+ str(year)+'----- ')
##2.1 slicing the column, and select capacity of current year.
df_pp = df_pp_pi.iloc[:,[0,1,2,year+3]]
# ~ df_comp0 = comp0_installed.iloc[:,[0,1,2,year+3]]
# ~ df_comp1 = comp1_installed.iloc[:,[0,1,2,year+3]]
# ~ df_comp2 = comp2_installed.iloc[:,[0,1,2,year+3]]
# ===================================================================
# 2.2 set carbon_tax
if year <= 10:
carbon_tax = 0 ## carbon tax is 0 before year 10.
elif 10 <= year <= 50:
carbon_tax = 250 * year - 2500 ## from year 10 to 50, carbon tax increases linearly to 10000 cent/ton.
else:
carbon_tax = 10000 ## after year 50, carbon tax stays at 10000 cent/ton.
# =================================================================
# creat empty lists and initial values.
eq_price_slice_64 = np.array([])
dispatched_slice_64 = np.array([])
coal_dispatch_slice = np.array([])
slice_nr = 0
df_pp['dispatched_yr'] = 0
df_pp['revenue_yr'] = 0
df_pp['operate_profit_yr'] = 0
# ~ df_comp0 ['operate_profit_yr'] = 0
# ~ df_comp1 ['operate_profit_yr'] = 0
# ~ df_comp2 ['operate_profit_yr'] = 0
df_pp['marginal_cost'] = df_pp['running_cost'] + carbon_tax * df_pp['emission_intensity']
# ~ df_comp0['marginal_cost'] = df_comp0['running_cost'] + carbon_tax * df_comp0['emission_intensity']
# ~ df_comp1['marginal_cost'] = df_comp0['running_cost'] + carbon_tax * df_comp1['emission_intensity']
# ~ df_comp2['marginal_cost'] = df_comp0['running_cost'] + carbon_tax * df_comp2['emission_intensity']
if biomass in fuel_list: ##check whether biofuel is available.
##if natural gas is more expensive than biogas.
df_pp = func_switch_bio(df_pp,biomass.running_cost)
# ~ df_comp0 = func_switch_bio(df_comp0,biomass.running_cost)
# ~ df_comp1 = func_switch_bio(df_comp1,biomass.running_cost)
# ~ df_comp2 = func_switch_bio(df_comp2,biomass.running_cost)
# ~ print('yes, biomass.')
# ===============================================================
##start ex-post analysis.
for q0 in range(len(q0t)): ##length is 4, loop through.
demand_level = q0t[q0]
for solar_index in range(len(availability_levels_solar)):##4 levels of solar availability.
avail_solar = availability_levels_solar[solar_index]
for wind_index in range(len(availability_levels_wind)):
avail_wind = availability_levels_wind[wind_index]
allocated_hours = q0_hr_slice[q0][solar_index][wind_index] ## hours in current slice.
# =======================================================
df_pp['capacity_available_KW'] = df_pp.apply(lambda row: fun_availability(row.plant_type,avail_solar,avail_wind),axis=1) * df_pp.capacity
# ~ df_comp0['capacity_available_KW'] = df_comp0.apply(lambda row: fun_availability(row.plant_type,avail_solar,avail_wind),axis=1) * df_comp0.capacity
# ~ df_comp1['capacity_available_KW'] = df_comp1.apply(lambda row: fun_availability(row.plant_type,avail_solar,avail_wind),axis=1) * df_comp1.capacity
# ~ df_comp2['capacity_available_KW'] = df_comp2.apply(lambda row: fun_availability(row.plant_type,avail_solar,avail_wind),axis=1) * df_comp2.capacity
ranked_dispatch = fun_rank_dispatch(df_pp)
eq_production,eq_price,last_supply_type,last_supply_percent = fun_demand_supply(df_pp,ranked_dispatch,demand_level)
eq_price_slice_64 = np.append(eq_price_slice_64,[eq_price])
df_pp['utilization'] = df_pp.apply(lambda row: fun_pp_utilization(row.marginal_cost,last_supply_percent,eq_price),axis=1)
# ~ df_comp0['utilization'] = df_comp0.apply(lambda row: fun_pp_utilization(row.marginal_cost,last_supply_percent,eq_price),axis=1)
# ~ df_comp1['utilization'] = df_comp1.apply(lambda row: fun_pp_utilization(row.marginal_cost,last_supply_percent,eq_price),axis=1)
# ~ df_comp2['utilization'] = df_comp2.apply(lambda row: fun_pp_utilization(row.marginal_cost,last_supply_percent,eq_price),axis=1)
df_pp['slice_dispatched_KWH'] = df_pp.utilization * df_pp.capacity_available_KW ##one slice, multiplied with allocated_hours
df_pp['slice_dispatched_KWHs'] = df_pp.slice_dispatched_KWH *allocated_hours ##one slice, multiplied with allocated_hours
# ~ df_comp0['slice_dispatched_KWHs'] = df_comp0.utilization * df_comp0.capacity_available_KW *allocated_hours ##one slice, multiplied with allocated_hours
# ~ df_comp1['slice_dispatched_KWHs'] = df_comp1.utilization * df_comp1.capacity_available_KW *allocated_hours ##one slice, multiplied with allocated_hours
# ~ df_comp2['slice_dispatched_KWHs'] = df_comp2.utilization * df_comp2.capacity_available_KW *allocated_hours ##one slice, multiplied with allocated_hours
df_pp['revenue_slice'] = df_pp.slice_dispatched_KWHs * eq_price
df_pp['operating_cost_slice'] = df_pp.slice_dispatched_KWHs * df_pp.marginal_cost
df_pp['operate_profit_slice'] = df_pp.revenue_slice - df_pp.operating_cost_slice
# ~ df_comp0['operate_profit_slice'] = df_comp0.slice_dispatched_KWHs * (eq_price - df_comp0.marginal_cost)
# ~ df_comp1['operate_profit_slice'] = df_comp1.slice_dispatched_KWHs * (eq_price - df_comp1.marginal_cost)
# ~ df_comp2['operate_profit_slice'] = df_comp2.slice_dispatched_KWHs * (eq_price - df_comp2.marginal_cost)
# ======================================================
# ======================================================
##recording slice results.
##1. dispatch of each slice.
slice_dispatch = df_pp[['plant_type','slice_dispatched_KWH']].set_index('plant_type')
slice_dispatched_series[str(slice_nr)] = pd.concat([slice_dispatch], axis=1, sort=True)
##2. sum-up production from all types of pp, for calcuating average price.
dispatched_slice_64 = np.append(dispatched_slice_64,[df_pp['slice_dispatched_KWHs'].sum()])
##3. accumulate slice results.
df_pp['dispatched_yr'] += df_pp.slice_dispatched_KWHs
df_pp['revenue_yr'] += df_pp.revenue_slice
df_pp['operate_profit_yr'] += df_pp.operate_profit_slice
# ~ df_comp0['operate_profit_yr'] += df_comp0.operate_profit_slice
# ~ df_comp1['operate_profit_yr'] += df_comp1.operate_profit_slice
# ~ df_comp2['operate_profit_yr'] += df_comp2.operate_profit_slice
# ~ print(df_pp['slice_dispatched_KWHs'])
# ~ print(dispatched_slice_64)
slice_nr +=1
continue
continue
continue
# =========================================================================#
##record yearly results.
#dispatch
yr_dispatch = df_pp[['plant_type','dispatched_yr']].set_index('plant_type')
yr_dispatched_series[str(year)] = pd.concat([yr_dispatch], axis=1, sort=True)
#revenue & profit
df_pp = func_switch_type(df_pp,'biomass','gas')
# ~ df_comp0 = func_switch_type(df_comp0,'biomass','gas')
# ~ df_comp1 = func_switch_type(df_comp1,'biomass','gas')
# ~ df_comp2 = func_switch_type(df_comp2,'biomass','gas')
# ~ index_biomass = df_pp.loc[df_pp.plant_type == 'biomass'].index
# ~ df_pp.at[index_biomass, 'plant_type'] = 'gas' ## switch name of biomass to gas.
##system level
yr_revenue = df_pp[['plant_type','revenue_yr']].set_index('plant_type')
yr_revenue_series[str(year)] = | pd.concat([yr_revenue], axis=1, sort=True) | pandas.concat |
from pandas import Series, Period, PeriodIndex, date_range
class create_period_index_from_date_range(object):
goal_time = 0.2
def time_period_index(self):
# Simulate irregular PeriodIndex
PeriodIndex(date_range('1985', periods=1000).to_pydatetime(), freq='D')
class period_setitem(object):
goal_time = 0.2
def setup(self):
self.N = 100000
self.rng = date_range(start='1/1/2000', periods=self.N, freq='T')
if hasattr(Series, 'convert'):
Series.resample = Series.convert
self.ts = Series(np.random.randn(self.N), index=self.rng)
self.rng = period_range(start='1/1/1990', freq='S', periods=20000)
self.df = DataFrame(index=range(len(self.rng)))
def time_period_setitem(self):
self.df['col'] = self.rng
class period_algorithm(object):
goal_time = 0.2
def setup(self):
data = [ | Period('2011-01', freq='M') | pandas.Period |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 10:50:05 2020
@author: obazgir
"""
import csv
import numpy as np
import pandas as pd
import os
import scipy as sp
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
import cv2
import pickle
from Toolbox import NRMSE, Random_Image_Gen, two_d_norm, two_d_eq, Assign_features_to_pixels, MDS_Im_Gen, Bias_Calc, REFINED_Im_Gen
from sklearn.metrics import mean_absolute_error
##########################################
# #
# #
# Data Cleaning #
# #
##########################################
cell_lines = ["HCC_2998","MDA_MB_435", "SNB_78", "NCI_ADR_RES","DU_145", "786_0", "A498","A549_ATCC","ACHN","BT_549","CAKI_1","DLD_1","DMS_114","DMS_273","CCRF_CEM","COLO_205","EKVX"]
#cell_lines = ["HCC_2998"]
Results_Dic = {}
SAVE_PATH = "/home/obazgir/REFINED/Volumetric_REFINED/Geometric_REFINED/"
#%%
for SEL_CEL in cell_lines:
# Loading the the drug responses and their IDs (NSC)
DF = | pd.read_csv("/home/obazgir/REFINED/NCI/NCI60_GI50_normalized_April.csv") | pandas.read_csv |
import sys
import time
sys.path.insert(0, 'src/Utilities')
import pandas as pd
import tools
import ml_utilities as mlu
import os
import re
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
def run_gender_cor(df, options, n, scoresdf, contrast_name, label):
classification = True
if label == 'gender':
df.drop(['label', 'age'], axis=1, inplace=True)
models = ["svm_kernel_default", "svm_kernel_tuned", "naive_bayes", "decision_tree", "rfc",
'logistic_regression']
elif label == 'age':
df.drop(['label', 'gender'], axis=1, inplace=True)
models = ['linear_reg', 'lasso', 'polynomial_reg']
models = ['svr_kernel_default', 'svr_kernel_tuned', 'gpr_default']
classification = False
df = df.rename(columns={label: 'label'})
for i in range(options.number_iterations):
train, test = mlu.train_test_split(df)
x_train, y_train = mlu.get_features_labels(train)
x_test, y_test = mlu.get_features_labels(test)
if classification:
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train, y_train)
x_test = scaler.transform(x_test)
if options.model == 'all':
for model_name in models:
train_score, train_balanced_score, trained_model, min_max_scaler = mlu.model_fitting(model_name,
x_train, y_train,
options.kFold,
options.normalize)
if options.normalize:
x_test_minmax = min_max_scaler.transform(x_test)
x_test = x_test_minmax
test_score = trained_model.score(x_test, y_test)
test_balanced_score = mlu.balanced_accuracy(trained_model.predict(x_test), y_test)
if not classification:
if model_name == "gpr_default":
pred, sigma = trained_model.predict(x_test, return_std=True)
else:
pred = trained_model.predict(x_test)
test_balanced_score = mean_squared_error(y_test, pred, multioutput='raw_values')
# print(model_name + " Train:"+ str(train_score) + " Test:" +str(test_score) +" Contrast:" +contrast_name)
scoresdf = scoresdf.append(
{'Score': train_score, 'Type': 'train', 'Model': model_name, 'Classifier': n,
'Contrast_name': contrast_name, 'Balanced_accuracy': train_balanced_score}, ignore_index=True)
scoresdf = scoresdf.append(
{'Score': test_score, 'Type': 'test', 'Model': model_name, 'Classifier': n,
'Contrast_name': contrast_name, 'Balanced_accuracy': test_balanced_score}, ignore_index=True)
else:
train_score, train_balanced_score, trained_model, min_max_scaler = mlu.model_fitting(options.model, x_train, y_train)
test_score = trained_model.score(x_test, y_test)
test_balanced_score = mlu.balanced_accuracy(trained_model.predict(x_test), y_test)
scoresdf = scoresdf.append(
{'Score': train_score, 'Type': 'train', 'Model': options.model, 'Classifier': n,
'Contrast_name': contrast_name, 'Balanced_accuracy': train_balanced_score}, ignore_index=True)
scoresdf = scoresdf.append(
{'Score': test_score, 'Type': 'test', 'Model': options.model, 'Classifier': n,
'Contrast_name': contrast_name, 'Balanced_accuracy': test_balanced_score}, ignore_index=True)
return scoresdf
def main():
options = tools.parse_options()
start = time.time()
if os.path.isfile(options.input):
scoresdf = | pd.read_csv(options.input) | pandas.read_csv |
import cv2 as cv
from datetime import datetime
import logging
import math
import numpy as np
import os
from os import path
import pandas as pd
import random
import requests
from timeit import default_timer as timer
import tkinter
def timeit(method):
"""
Decorador para medir el tiempo de ejecución de funciones
:param method:
:return:
"""
def timed(*args, **kw):
ts = timer()
result = method(*args, **kw)
te = timer()
logging.info('Time for method \'{}\': {} ms'.format(method.__name__, te - ts))
return result
return timed
def warmup(method):
"""
Decorador para realizar fase de calentamiento antes de cualquier prueba de tiempo.
:param method:
:return:
"""
def warm(*args, **kw):
print("warming...")
for _ in range(10000):
random.randint(0, 1000000)
result = method(*args, **kw)
return result
return warm
def load_scores(file_path):
"""
Cargar scores guardados en un fichero de texto, donde cada linea es un score.
:param file_path: dirección del fichero de texto.
:return:
"""
f = open(file_path, "r")
for line in f:
dist = float(line.strip())
yield dist
f.close()
def date_string():
"""
Obtener string con la fecha actual en formato dia-mes-año_hora-minuto-segundo.
:return: string con la fecha.
"""
return datetime.now().strftime("%y-%m-%d_%H-%M-%S")
def decompose_seconds(seconds):
"""
Descomponer una cantidad de segundos en horas, minutos y segundos.
:param seconds:
:return:
"""
h = int(seconds // 3600)
m = int(seconds % 3600 // 60)
s = int(seconds % 60)
ms = int((seconds - int(seconds)) * 1000)
return h, m, s, ms
def time_string(seconds, with_ms=False, sep=':'):
"""
Devolver un string en formato HH:MM:SS según la cantidad de segundos pasados por parámetro.
Si with_ms=True el formato es HH:MM:SS,MS
:param seconds:
:param with_ms:
:param sep:
:return:
"""
h, m, s, ms = decompose_seconds(seconds)
if with_ms:
return f'{h:02d}{sep}{m:02d}{sep}{s:02d},{ms:03d}'
else:
return f'{h:02d}{sep}{m:02d}{sep}{s:02d}'
def write_xlsx(datas, output_file):
"""
Escribir una tabla de datos en un archivo xlsx.
:param datas: lista de tablas de datos con formato {columna: [valores], ...}.
:param output_file: fichero de salida.
:return:
"""
file_dir, _ = path.split(output_file)
if file_dir != '' and not path.exists(file_dir):
os.makedirs(file_dir)
writer = pd.ExcelWriter(output_file, engine='xlsxwriter')
sheet_name = 'Sheet1'
row = 0
if isinstance(datas, list):
for data in datas:
df = | pd.DataFrame(data) | pandas.DataFrame |
__author__ = "<NAME>"
__date__ = "Dec 14, 2010"
import csv
import json
from pathlib import Path
from numpy import NaN, concatenate
from openpyxl import load_workbook
from pandas import DataFrame, ExcelWriter, read_excel
from corems.mass_spectrum.output.export import HighResMassSpecExport
from corems.molecular_id.calc.SpectralSimilarity import methods_name
from corems.encapsulation.constant import Atoms
from corems.encapsulation.output import parameter_to_dict
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecfromFreq
from corems import __version__, corems_md5
import uuid
class LowResGCMSExport():
def __init__(self, out_file_path, gcms):
'''
output_type: str
'excel', 'csv', 'hdf5' or 'pandas'
'''
self.output_file = Path(out_file_path)
self.gcms = gcms
self._init_columns()
def _init_columns(self):
columns = ['Sample name', 'Peak Index', 'Retention Time', 'Retention Time Ref', 'Peak Height',
'Peak Area', 'Retention index', 'Retention index Ref', 'Retention Index Score',
'Similarity Score',
'Spectral Similarity Score',
'Compound Name',
"Chebi ID", "Kegg Compound ID",
"Inchi", "Inchi Key",
"Smiles",
"Molecular Formula",
"IUPAC Name",
"Traditional Name",
"Common Name",
'Derivatization'
]
if self.gcms.molecular_search_settings.exploratory_mode:
columns.extend(['Weighted Cosine Correlation',
'Cosine Correlation',
'Stein Scott Similarity',
'Pearson Correlation',
'Spearman Correlation',
'Kendall Tau Correlation',
'Euclidean Distance',
'Manhattan Distance',
'Jaccard Distance',
'DWT Correlation',
'DFT Correlation'])
columns.extend(list(methods_name.values()))
return columns
def get_pandas_df(self, id_label="corems:"):
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
df.name = self.gcms.sample_name
return df
def get_json(self, nan=False, id_label="corems:"):
import json
dict_data_list = self.get_list_dict_data(self.gcms)
return json.dumps(dict_data_list, sort_keys=False, indent=4, separators=(',', ': '))
def to_pandas(self, write_metadata=True, id_label="corems:"):
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
df.to_pickle(self.output_file.with_suffix('.pkl'))
if write_metadata:
self.write_settings(self.output_file.with_suffix('.pkl'), self.gcms, id_label="corems:")
def to_excel(self, write_mode='a', write_metadata=True, id_label="corems:"):
out_put_path = self.output_file.with_suffix('.xlsx')
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
df = DataFrame(dict_data_list, columns=columns)
if write_mode == 'a' and out_put_path.exists():
writer = ExcelWriter(out_put_path, engine='openpyxl')
# try to open an existing workbook
writer.book = load_workbook(out_put_path)
# copy existing sheets
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
# read existing file
reader = read_excel(out_put_path)
# write out the new sheet
df.to_excel(writer, index=False, header=False, startrow=len(reader) + 1)
writer.close()
else:
df.to_excel(self.output_file.with_suffix('.xlsx'), index=False, engine='openpyxl')
if write_metadata:
self.write_settings(out_put_path, self.gcms, id_label=id_label)
def to_csv(self, separate_output=False, write_mode="w", write_metadata=True, id_label="corems:"):
if separate_output:
# set write mode to write
# this mode will overwrite the file without warning
write_mode = 'w'
else:
# set write mode to append
write_mode = 'a'
columns = self._init_columns()
dict_data_list = self.get_list_dict_data(self.gcms)
out_put_path = self.output_file.with_suffix('.csv')
write_header = not out_put_path.exists()
try:
with open(out_put_path, write_mode, newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=columns)
if write_header:
writer.writeheader()
for data in dict_data_list:
writer.writerow(data)
if write_metadata:
self.write_settings(out_put_path, self.gcms, id_label=id_label)
except IOError as ioerror:
print(ioerror)
def to_hdf(self, id_label="corems:"):
# save sample at a time
def add_compound(gc_peak, compound_obj):
modifier = compound_obj.classify if compound_obj.classify else ""
compound_group = peak_group.create_group(compound_obj.name.replace('/', '') + " " + modifier)
compound_group.attrs["retention_time"] = compound_obj.rt
compound_group.attrs["retention_index"] = compound_obj.ri
compound_group.attrs["retention_index_score"] = compound_obj.ri_score
compound_group.attrs["spectral_similarity_score"] = compound_obj.spectral_similarity_score
compound_group.attrs["similarity_score"] = compound_obj.similarity_score
compond_mz = compound_group.create_dataset('mz', data=np.array(compound_obj.mz), dtype="f8")
compond_abundance = compound_group.create_dataset('abundance', data=np.array(compound_obj.abundance), dtype="f8")
if self.gcms.molecular_search_settings.exploratory_mode:
compound_group.attrs['Spectral Similarities'] = json.dumps(compound_obj.spectral_similarity_scores,
sort_keys=False, indent=4, separators=(',',':'))
import h5py
import json
import numpy as np
from datetime import datetime, timezone
output_path = self.output_file.with_suffix('.hdf5')
with h5py.File(output_path, 'w') as hdf_handle:
timenow = str(datetime.now(timezone.utc).strftime("%d/%m/%Y %H:%M:%S %Z"))
hdf_handle.attrs['time_stamp'] = timenow
hdf_handle.attrs['data_structure'] = 'gcms'
hdf_handle.attrs['analyzer'] = self.gcms.analyzer
hdf_handle.attrs['instrument_label'] = self.gcms.instrument_label
hdf_handle.attrs['sample_id'] = "self.gcms.id"
hdf_handle.attrs['sample_name'] = self.gcms.sample_name
hdf_handle.attrs['input_data'] = str(self.gcms.file_location)
hdf_handle.attrs['output_data'] = str(output_path)
hdf_handle.attrs['output_data_id'] = id_label + uuid.uuid4().hex
hdf_handle.attrs['corems_version'] = __version__
hdf_handle.attrs["Stats"] = json.dumps(self.get_data_stats(self.gcms), sort_keys=False, indent=4, separators=(',', ': '))
hdf_handle.attrs["Calibration"] = json.dumps(self.get_calibration_stats(self.gcms, id_label), sort_keys=False, indent=4, separators=(',', ': '))
hdf_handle.attrs["Blank"] = json.dumps(self.get_blank_stats(self.gcms), sort_keys=False, indent=4, separators=(',', ': '))
corems_dict_setting = parameter_to_dict.get_dict_data_gcms(self.gcms)
hdf_handle.attrs["CoreMSParameters"] = json.dumps(corems_dict_setting, sort_keys=False, indent=4, separators=(',', ': '))
scans_dataset = hdf_handle.create_dataset('scans', data=np.array(self.gcms.scans_number), dtype="f8")
rt_dataset = hdf_handle.create_dataset('rt', data=np.array(self.gcms.retention_time), dtype="f8")
tic_dataset = hdf_handle.create_dataset('tic', data=np.array(self.gcms.tic), dtype="f8")
processed_tic_dataset = hdf_handle.create_dataset('processed_tic', data=np.array(self.gcms.processed_tic), dtype="f8")
output_score_method = self.gcms.molecular_search_settings.output_score_method
for gc_peak in self.gcms:
# print(gc_peak.rt)
# print(gc_peak.tic)
# check if there is a compound candidate
peak_group = hdf_handle.create_group(str(gc_peak.rt))
peak_group.attrs["deconvolution"] = int(self.gcms.chromatogram_settings.use_deconvolution)
peak_group.attrs["start_index"] = gc_peak.start_index
peak_group.attrs["index"] = gc_peak.index
peak_group.attrs["final_index"] = gc_peak.final_index
peak_group.attrs["retention_index"] = gc_peak.ri
peak_group.attrs["retention_time"] = gc_peak.rt
peak_group.attrs["area"] = gc_peak.area
mz = peak_group.create_dataset('mz', data=np.array(gc_peak.mass_spectrum.mz_exp), dtype="f8")
abundance = peak_group.create_dataset('abundance', data=np.array(gc_peak.mass_spectrum.abundance), dtype="f8")
if gc_peak:
if output_score_method == 'highest_sim_score':
compound_obj = gc_peak.highest_score_compound
add_compound(gc_peak, compound_obj)
elif output_score_method == 'highest_ss':
compound_obj = gc_peak.highest_ss_compound
add_compound(gc_peak, compound_obj)
else:
for compound_obj in gc_peak:
add_compound(gc_peak, compound_obj)
def get_data_stats(self, gcms):
matched_peaks = gcms.matched_peaks
no_matched_peaks = gcms.no_matched_peaks
unique_metabolites = gcms.unique_metabolites
peak_matchs_above_0p85 = 0
unique_peak_match_above_0p85 = 0
for match_peak in matched_peaks:
gc_peak_above_85 = 0
matches_above_85 = list(filter(lambda m: m.similarity_score >= 0.85, match_peak))
if matches_above_85:
peak_matchs_above_0p85 +=1
if len(matches_above_85) == 1:
unique_peak_match_above_0p85 += 1
data_stats = {}
data_stats['average_signal_noise'] = "ni"
data_stats['chromatogram_dynamic_range'] = gcms.dynamic_range
data_stats['total_number_peaks'] = len(gcms)
data_stats['total_peaks_matched'] = len(matched_peaks)
data_stats['total_peaks_without_matches'] = len(no_matched_peaks)
data_stats['total_matches_above_similarity_score_0.85'] = peak_matchs_above_0p85
data_stats['single_matches_above_similarity_score_0.85'] = unique_peak_match_above_0p85
data_stats['unique_metabolites'] = len(unique_metabolites)
return data_stats
def get_calibration_stats(self, gcms, id_label):
calibration_parameters = {}
calibration_parameters['calibration_rt_ri_pairs_ref'] = gcms.ri_pairs_ref
calibration_parameters['data_url'] = str(gcms.cal_file_path)
calibration_parameters['has_input'] = id_label + corems_md5(gcms.cal_file_path)
calibration_parameters['data_name'] = str(gcms.cal_file_path.stem)
calibration_parameters['calibration_method'] = ""
return calibration_parameters
def get_blank_stats(self, gcms):
blank_parameters = {}
blank_parameters['data_name'] = "ni"
blank_parameters['blank_id'] = "ni"
blank_parameters['data_url'] = "ni"
blank_parameters['has_input'] = "ni"
blank_parameters['common_features_to_blank'] = "ni"
return blank_parameters
def get_instrument_metadata(self, gcms):
instrument_metadata = {}
instrument_metadata['analyzer'] = gcms.analyzer
instrument_metadata['instrument_label'] = gcms.instrument_label
instrument_metadata['instrument_id'] = uuid.uuid4().hex
return instrument_metadata
def get_data_metadata(self, gcms, id_label, output_path):
if isinstance(output_path, str):
output_path = Path(output_path)
paramaters_path = output_path.with_suffix('.json')
if paramaters_path.exists():
with paramaters_path.open() as current_param:
metadata = json.load(current_param)
data_metadata = metadata.get('Data')
else:
data_metadata = {}
data_metadata['data_name'] = []
data_metadata['input_data_url'] = []
data_metadata['has_input'] = []
data_metadata['data_name'].append(gcms.sample_name)
data_metadata['input_data_url'].append(str(gcms.file_location))
data_metadata['has_input'].append(id_label + corems_md5(gcms.file_location))
data_metadata['output_data_name'] = str(output_path.stem)
data_metadata['output_data_url'] = str(output_path)
data_metadata['has_output'] = id_label + corems_md5(output_path)
return data_metadata
def get_parameters_json(self, gcms, id_label, output_path):
output_parameters_dict = {}
output_parameters_dict['Data'] = self.get_data_metadata(gcms, id_label, output_path)
output_parameters_dict["Stats"] = self.get_data_stats(gcms)
output_parameters_dict["Calibration"] = self.get_calibration_stats(gcms, id_label)
output_parameters_dict["Blank"] = self.get_blank_stats(gcms)
output_parameters_dict["Instrument"] = self.get_instrument_metadata(gcms)
corems_dict_setting = parameter_to_dict.get_dict_data_gcms(gcms)
corems_dict_setting['corems_version'] = __version__
output_parameters_dict["CoreMSParameters"] = corems_dict_setting
output_parameters_dict["has_metabolite"] = gcms.metabolites_data
output = json.dumps(output_parameters_dict, sort_keys=False, indent=4, separators=(',', ': '))
return output
def write_settings(self, output_path, gcms, id_label="emsl:"):
output = self.get_parameters_json(gcms, id_label, output_path)
with open(output_path.with_suffix('.json'), 'w', encoding='utf8', ) as outfile:
outfile.write(output)
def get_list_dict_data(self, gcms, include_no_match=True, no_match_inline=False):
output_score_method = gcms.molecular_search_settings.output_score_method
dict_data_list = []
def add_match_dict_data():
derivatization = "{}:{}:{}".format(compound_obj.classify, compound_obj.derivativenum, compound_obj.derivatization)
out_dict = {'Sample name': gcms.sample_name,
'Peak Index': gcpeak_index,
'Retention Time': gc_peak.rt,
'Retention Time Ref': compound_obj.rt,
'Peak Height': gc_peak.tic,
'Peak Area': gc_peak.area,
'Retention index': gc_peak.ri,
'Retention index Ref': compound_obj.ri,
'Retention Index Score': compound_obj.ri_score,
'Spectral Similarity Score': compound_obj.spectral_similarity_score,
'Similarity Score': compound_obj.similarity_score,
'Compound Name': compound_obj.name,
"Chebi ID": compound_obj.metadata.chebi,
"Kegg Compound ID": compound_obj.metadata.kegg,
"Inchi": compound_obj.metadata.inchi,
"Inchi Key": compound_obj.metadata.inchikey,
"Smiles": compound_obj.metadata.smiles,
"Molecular Formula": compound_obj.formula,
"IUPAC Name": compound_obj.metadata.iupac_name,
"Traditional Name": compound_obj.metadata.traditional_name,
"Common Name": compound_obj.metadata.common_name,
'Derivatization': derivatization,
}
if self.gcms.molecular_search_settings.exploratory_mode:
out_dict.update({
'Weighted Cosine Correlation': compound_obj.spectral_similarity_scores.get("weighted_cosine_correlation"),
'Cosine Correlation': compound_obj.spectral_similarity_scores.get("cosine_correlation"),
'Stein Scott Similarity': compound_obj.spectral_similarity_scores.get("stein_scott_similarity"),
'Pearson Correlation': compound_obj.spectral_similarity_scores.get("pearson_correlation"),
'Spearman Correlation': compound_obj.spectral_similarity_scores.get("spearman_correlation"),
'Kendall Tau Correlation': compound_obj.spectral_similarity_scores.get("kendall_tau_correlation"),
'DFT Correlation': compound_obj.spectral_similarity_scores.get("dft_correlation"),
'DWT Correlation': compound_obj.spectral_similarity_scores.get("dwt_correlation"),
'Euclidean Distance': compound_obj.spectral_similarity_scores.get("euclidean_distance"),
'Manhattan Distance': compound_obj.spectral_similarity_scores.get("manhattan_distance"),
'Jaccard Distance': compound_obj.spectral_similarity_scores.get("jaccard_distance")
})
for method in methods_name:
out_dict[methods_name.get(method)] = compound_obj.spectral_similarity_scores.get(method)
dict_data_list.append(out_dict)
def add_no_match_dict_data():
dict_data_list.append({'Sample name': gcms.sample_name,
'Peak Index': gcpeak_index,
'Retention Time': gc_peak.rt,
'Peak Height': gc_peak.tic,
'Peak Area': gc_peak.area,
'Retention index': gc_peak.ri,
})
for gcpeak_index, gc_peak in enumerate(gcms.sorted_gcpeaks):
# check if there is a compound candidate
if gc_peak:
if output_score_method == 'highest_sim_score':
compound_obj = gc_peak.highest_score_compound
add_match_dict_data()
elif output_score_method == 'highest_ss':
compound_obj = gc_peak.highest_ss_compound
add_match_dict_data()
else:
for compound_obj in gc_peak:
add_match_dict_data() # add monoisotopic peak
else:
# include not_match
if include_no_match and no_match_inline:
add_no_match_dict_data()
if include_no_match and not no_match_inline:
for gcpeak_index, gc_peak in enumerate(gcms.sorted_gcpeaks):
if not gc_peak:
add_no_match_dict_data()
return dict_data_list
class HighResMassSpectraExport(HighResMassSpecExport):
'''
'''
def __init__(self, out_file_path, mass_spectra, output_type='excel'):
'''
output_type: str
'excel', 'csv', 'hdf5' or 'pandas'
'''
self.output_file = Path(out_file_path)
self.dir_loc = Path(out_file_path + ".corems")
self.dir_loc.mkdir(exist_ok=True)
# 'excel', 'csv' or 'pandas'
self._output_type = output_type
self.mass_spectra = mass_spectra
self._init_columns()
def get_pandas_df(self):
list_df = []
for mass_spectrum in self.mass_spectra:
columns = self.columns_label + self.get_all_used_atoms_in_order(mass_spectrum)
dict_data_list = self.get_list_dict_data(mass_spectrum)
df = DataFrame(dict_data_list, columns=columns)
scan_number = mass_spectrum.scan_number
df.name = str(self.output_file) + '_' + str(scan_number)
list_df.append(df)
return list_df
def to_pandas(self, write_metadata=True):
for mass_spectrum in self.mass_spectra:
columns = self.columns_label + self.get_all_used_atoms_in_order(mass_spectrum)
dict_data_list = self.get_list_dict_data(mass_spectrum)
df = | DataFrame(dict_data_list, columns=columns) | pandas.DataFrame |
# Return EHF from multiple simulation results of Operative Temperature
import argparse
import csv
import datetime
import glob
from multiprocessing import Pool
import os
import pandas as pd
FOLDER_STDRD = 'cluster'
LEN_FOLDER_NAME = len(FOLDER_STDRD) +1
NUMBER_OF_DIGITS = 4
BASE_DIR = '/media/marcelo/OS/Cps/TPU'
MONTH_MEANS = pd.read_csv('/media/marcelo/OS/LabEEE_1-2/idf-creator/month_means_8760.csv')
MAX_THREADS = 10
def process_folder(folder):
line = 0
folder_name = folder[len(folder)-LEN_FOLDER_NAME:]
os.chdir(folder) # BASE_DIR+'/'+
epjson_files = sorted(glob.glob('*.epJSON'))
df_temp = {
'folder': [],
'file': [],
'zone': [],
'temp': [],
'ach': [],
'ehf': []
}
for file in epjson_files:
file_n = int(file[len(file)-7-NUMBER_OF_DIGITS:len(file)-7])
print(line,' ',file, end='\r')
line += 1
csv_file = file[:-7]+'out.csv'
df = pd.read_csv(csv_file)
n_zones = 6
for zn in range(n_zones):
df_temp['file'].append(file[:-7])
df_temp['folder'].append(folder_name)
df_temp['zone'].append(zn)
df_temp['temp'].append((df['OFFICE_'+'{:02.0f}'.format(zn)+':Zone Operative Temperature [C](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df_temp['ach'].append((df['OFFICE_'+'{:02.0f}'.format(zn)+':AFN Zone Infiltration Air Change Rate [ach](Hourly)'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0]).mean())
df['E_hot'] = -1
df['sup_lim'] = MONTH_MEANS['mean_temp'] + 3.5
df.loc[df['OFFICE_'+'{:02.0f}'.format(zn)+':Zone Operative Temperature [C](Hourly)'] > df['sup_lim'], 'E_hot'] = 1
df.loc[df['OFFICE_'+'{:02.0f}'.format(zn)+':Zone Operative Temperature [C](Hourly)'] <= df['sup_lim'], 'E_hot'] = 0
df_temp['ehf'].append(df['E_hot'][df['SCH_OCUPACAO:Schedule Value [](Hourly)'] > 0].mean())
df_output = | pd.DataFrame(df_temp) | pandas.DataFrame |
import numpy as np
#import matplotlib
#import matplotlib.pyplot as plt
#import seaborn as sns
import networkx as nx
import pandas as pd
import random
import string
import scipy.stats
import network_prop
import sys
#import visJS2jupyter.visJS_module
#import visJS2jupyter.visualizations
def main(num_reps=10,int_name='GIANT_p3',rand_method = 'degree_ks_test',single_or_double='single'):
'''
Calculate z-scores for heat propagation
example:
python AWS_zscore_jamieson.py 1000 STRING degree_binning single
'''
print('number of randomizations = '+str(num_reps))
print('background interactome = ' + int_name)
print('randomization method = ' + rand_method)
print('single or double = ' + single_or_double)
num_reps = int(num_reps)
# load interactomes and select focal interactome
Gint = nx.Graph()
if int_name=='GIANT_p3':
G_giant = nx.read_gpickle('interactomes/G_giant_.3.gpickle')
Gint = G_giant
elif int_name=='GIANT_p25':
G_giant = nx.read_gpickle('interactomes/G_giant_.25.gpickle')
Gint = G_giant
elif int_name=='GIANT_p2':
G_giant = nx.read_gpickle('../interactomes/G_giant_.2.gpickle')
Gint = G_giant
elif int_name=='GIANT_p15':
G_giant = nx.read_gpickle('interactomes/G_giant_.15.gpickle')
Gint = G_giant
elif int_name=='GIANT_full_p2':
G_giant = nx.read_gpickle('interactomes/G_giant_full.2.gpickle')
Gint = G_giant
elif int_name=='GIANT_kidney_p2':
G_giant = nx.read_gpickle('interactomes/kidney_0.2.gpickle')
Gint = G_giant
elif int_name=='GIANT_heart_p2':
G_giant = nx.read_gpickle('interactomes/G_giant_heart.2.gpickle')
Gint = G_giant
elif int_name=='GIANT_neuron_p3':
G_giant = nx.read_gpickle('interactomes/G_giant_neuron.3.gpickle')
Gint = G_giant
elif int_name=='STRING':
G_str = nx.read_gpickle('../interactomes/G_str_181022.gpickle')
Gint = G_str
elif int_name=='Menche':
G_menche = nx.read_gpickle('interactomes/G_menche.gpickle')
Gint = G_menche
elif int_name=='inBIO':
G_inbio = nx.read_gpickle('interactomes/G_inbio.gpickle')
Gint = G_inbio
elif int_name=='PCnet':
G_pcnet = nx.read_gpickle('../interactomes/G_PCnet.gpickle')
Gint = G_pcnet
elif int_name=='PCnet_pruned':
G_pcnet = nx.read_gpickle('../interactomes/PCnet_pruned_5MB.gpickle')
Gint = G_pcnet
elif int_name=='G_coexp_preA_p8':
G_coexp = nx.read_gpickle('../interactomes/G_coexp_preA.8.gpickle')
Gint = G_coexp
elif int_name=='G_coexp_preU_p8':
G_coexp = nx.read_gpickle('../interactomes/G_coexp_preU.8.gpickle')
Gint = G_coexp
if 'None' in Gint.nodes():
Gint.remove_node('None')
# load HC genes
# AMLvsAN_genes = pd.read_csv('AMLvsAN_seed_genes_190417.tsv',sep='\t',index_col='Unnamed: 0')
# MFvsAN_genes = pd.read_csv('MFvsAN_seed_genes_190417.tsv',sep='\t',index_col='Unnamed: 0')
# AMLvsMF_genes = pd.read_csv('AMLvsMF_seed_genes_190417.tsv',sep='\t',index_col='Unnamed: 0')
# updated 5/9/19
AMLvsAN_genes = pd.read_csv('AMLvsAN_seed_genes_190509.tsv',sep='\t',index_col='Unnamed: 0')
MFvsAN_genes = pd.read_csv('MFvsAN_seed_genes_190509.tsv',sep='\t',index_col='Unnamed: 0')
AMLvsMF_genes = pd.read_csv('AMLvsMF_seed_genes_190509.tsv',sep='\t',index_col='Unnamed: 0')
seed_AMLvsAN = AMLvsAN_genes['seed_genes'].tolist()[0].translate(None,string.punctuation).split(' ')
print(len(seed_AMLvsAN))
seed_AMLvsAN = list(np.intersect1d(seed_AMLvsAN,Gint.nodes())) # only keep seed genes in the interactome
print(len(seed_AMLvsAN))
seed_MFvsAN = MFvsAN_genes['seed_genes'].tolist()[0].translate(None,string.punctuation).split(' ')
print(len(seed_MFvsAN))
seed_MFvsAN = list(np.intersect1d(seed_MFvsAN,Gint.nodes())) # only keep seed genes in the interactome
print(len(seed_MFvsAN))
seed_AMLvsMF = AMLvsMF_genes['seed_genes'].tolist()[0].translate(None,string.punctuation).split(' ')
print(len(seed_AMLvsMF))
seed_AMLvsMF = list(np.intersect1d(seed_AMLvsMF,Gint.nodes())) # only keep seed genes in the interactome
print(len(seed_AMLvsMF))
# calculate the z-score
# calc Wprime from Gint
Wprime = network_prop.normalized_adj_matrix(Gint,conserve_heat=True)
if single_or_double=='single': # calculate z-scores from a single set of seed genes
print('calculating z-scores AMLvsAN')
z_seed_AMLvsAN,Fnew_rand_AMLvsAN = calc_zscore_heat(Gint,Wprime,seed_AMLvsAN,num_reps=num_reps,rand_method=rand_method)
z_seed_AMLvsAN.to_csv('z_seed_AMLvsAN'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
print('calculating z-scores MFvsAN')
z_seed_MFvsAN,Fnew_rand_MFvsAN = calc_zscore_heat(Gint,Wprime,seed_MFvsAN,num_reps=num_reps,rand_method=rand_method)
z_seed_MFvsAN.to_csv('z_seed_MFvsAN'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
print('calculating z-scores AMLvsMF')
z_seed_AMLvsMF,Fnew_rand_AMLvsMF = calc_zscore_heat(Gint,Wprime,seed_AMLvsMF,num_reps=num_reps,rand_method=rand_method)
z_seed_AMLvsMF.to_csv('z_seed_AMLvsMF'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
# ----- this part is obsolete -----
# elif single_or_double=='double': # calculate z-scores from two sets of seed genes:
# --- CNV-DIFF_SPLICE and CNV-METH temorarily commented out ----
# print('calculating CNV-DIFF_SPLICE z-scores')
# z_CNV_DIFF_SPLICE,Fnew_rand_CNV_DIFF_SPLICE = calc_zscore_heat_double(Gint,Wprime,CNV_HC,DIFF_SPLICE_HC,num_reps=num_reps,rand_method = rand_method)
# z_CNV_DIFF_SPLICE.to_csv('z_CNV_DIFF_SPLICE'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
#
# print('calculating CNV-METH z-scores')
# z_CNV_METH,Fnew_rand_CNV_METH = calc_zscore_heat_double(Gint,Wprime,CNV_HC,METH_HC,num_reps=num_reps,rand_method = rand_method)
# z_CNV_METH.to_csv('z_CNV_METH'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
# print('calculating METH-DIFF_SPLICE z-scores')
# z_METH_DIFF_SPLICE,Fnew_rand_METH_DIFF_SPLICE = calc_zscore_heat_double(Gint,Wprime,METH_HC,DIFF_SPLICE_HC,num_reps=num_reps,rand_method = rand_method)
# z_METH_DIFF_SPLICE.to_csv('z_METH_DIFF_SPLICE'+str(num_reps)+'_reps'+int_name+'_'+rand_method+'.tsv',sep='\t')
def calc_zscore_heat(Gint,Wprime,genes_D1,num_reps=10,ks_sig = 0.3,rand_method = 'degree_ks_test'):
'''
Helper function to calculate the z-score of heat values from one input seet of genes
rand_method = 'degree_ks_test', or 'degree_binning'. select the type of randomization
'''
seed_D1 = list(np.intersect1d(list(genes_D1),Gint.nodes()))
Fnew_D1 = network_prop.network_propagation(Gint,Wprime,seed_D1,alpha=.5,num_its=20)
Fnew_rand_D1 = np.zeros([num_reps,len(Fnew_D1)])
if rand_method == 'degree_ks_test':
for r in range(num_reps):
if (r%50)==0:
print(r)
# UPDATE 8/23/17 -- replace with randomly selecting seed nodes, checking for degree distribution equivalence
p=0
# resample until degree distributions are not significantly different
while p<ks_sig:
seed_D1_random = Gint.nodes()
np.random.shuffle(seed_D1_random)
seed_D1_random = seed_D1_random[0:len(seed_D1)]
ks_stat,p=scipy.stats.ks_2samp(pd.Series(Gint.degree(seed_D1)),pd.Series(Gint.degree(seed_D1_random)))
Fnew_rand_tmp = network_prop.network_propagation(Gint,Wprime,seed_D1_random,alpha=.5,num_its=20)
Fnew_rand_tmp.loc[seed_D1_random]=np.nan # set seeds to nan so they don't bias results
Fnew_rand_D1[r] = Fnew_rand_tmp.loc[Fnew_D1.index.tolist()]
elif rand_method == 'degree_binning':
bins = get_degree_binning(Gint,10)
min_degree, max_degree, genes_binned = zip(*bins)
bin_df = | pd.DataFrame({'min_degree':min_degree,'max_degree':max_degree,'genes_binned':genes_binned}) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = | date_range("2012-01-01", periods=3, tz=tz) | pandas.date_range |
import os
import pandas as pd
import argparse
mainAppRepo = os.path.dirname(os.path.abspath(__file__)) + '/'
# SITE NAME
def get_site_name_from_site_number(site_number):
sites = pd.read_csv(mainAppRepo + 'data/study_sites.txt',
sep=',', header=0, index_col=0) #\\s+
site_name = sites.index._data[site_number]
return site_name
# H ind CSV FILE
def get_csv_file_with_indicator_for_a_context(site_number, chronicle, approx, folder):
indicator = "H"
site_name = get_site_name_from_site_number(site_number)
file_name = "Exps_" + indicator + "_Indicator_" + site_name + "_Chronicle"+ str(chronicle) + "_Approx" + str(approx) + ".csv"
indicator_file = folder + "/" + site_name + "/" + file_name
try:
dfp = pd.read_csv(indicator_file, sep=",")
except:
print("File does not exist")
dfp = pd.DataFrame()
return dfp
def get_csv_file_with_steady_features_for_a_context(site_number, chronicle, folder):
site_name = get_site_name_from_site_number(site_number)
model_name = "model_time_0_geo_0_thick_1_K_86.4_Sy_0.1_Step1_site" + str(site_number) + "_Chronicle" + str(chronicle) + "_SteadyState"
file_name = model_name + "_extracted_features.csv"
steady_file = folder + "/" + site_name + "/" + model_name + "/" + file_name
try:
df = pd.read_csv(steady_file, sep=";")
except:
print("File for site " + site_name + " (number : " + str(site_number) + " & chronicle " + str(chronicle) + ") does not exist")
df = | pd.DataFrame() | pandas.DataFrame |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']) ])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = self.store.select('p4d',[ Term('major_axis<20000108'), Term('minor_axis', '=', ['A','B']), Term('items', '=', ['ItemA','ItemB']) ])
expected = p4d.truncate(after='20000108').reindex(minor=['A', 'B'],items=['ItemA','ItemB'])
tm.assert_panel4d_equal(result, expected)
# valid terms
terms = [
dict(field = 'major_axis', op = '>', value = '20121114'),
('major_axis', '20121114'),
('major_axis', '>', '20121114'),
(('major_axis', ['20121114','20121114']),),
('major_axis', datetime(2012,11,14)),
'major_axis>20121114',
'major_axis>20121114',
'major_axis>20121114',
(('minor_axis', ['A','B']),),
(('minor_axis', ['A','B']),),
((('minor_axis', ['A','B']),),),
(('items', ['ItemA','ItemB']),),
('items=ItemA'),
]
for t in terms:
self.store.select('wp', t)
self.store.select('p4d', t)
# valid for p4d only
terms = [
(('labels', '=', ['l1','l2']),),
Term('labels', '=', ['l1','l2']),
]
for t in terms:
self.store.select('p4d', t)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, tm.assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, tm.assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, tm.assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0.,1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r : tm.assert_series_equal(l, r, True, True, True)
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
from datetime import date
ser = Series(values, [date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime(2012, 1, 1), datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
# not consolidated
df['foo'] = np.random.randn(len(df))
self.store['df'] = df
recons = self.store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = | DataFrame(index=['a', 'b', 'c']) | pandas.DataFrame |
from timeUtils import clock, elapsed
from listUtils import getFlatList
from ioUtils import getFile, saveFile
from pandas import Series, DataFrame
class masterDBMatchClass:
def __init__(self, maindb, mdbmaps):
self.maindb = maindb
self.mdbmaps = mdbmaps
self.finalArtistName = maindb.artistColumnName
print("Loading Artist Names")
self.artistData = {db: self.getArtistNameDB(db) for db in maindb.dbdata.keys()}
#self.matchData = {db: self.getDBMatchData(db) for db in maindb.dbdata.keys()}
self.clean()
self.knownCounter = None
self.initFuncs = {}
self.knownDBs = ['Discogs', 'AllMusic', 'MusicBrainz', 'RateYourMusic', 'Deezer', 'LastFM', 'Genius', 'AlbumOfTheYear','KWorbiTunes', 'KWorbSpotify']
self.knownDBs = ['Discogs', 'AllMusic', 'MusicBrainz']
def setKnownDBs(self, knownDBs=['Discogs', 'AllMusic', 'MusicBrainz']):
self.knownDBs = knownDBs
def getArtistNameDB(self, db):
return self.maindb.dbdata[db]["Disc"].getMasterDBArtistDataFrame()
def getArtistAlbumsDB(self, db):
return self.maindb.dbdata[db]["Disc"].getMasterDBArtistAlbumsDataFrame()
def clean(self):
self.matchData = {db: None for db in self.maindb.dbdata.keys()}
def setDBMatchData(self, dbName, matchData):
print(" Setting matchData for {0}".format(dbName))
self.matchData[dbName] = matchData
def getDF(self, dbName):
matchData = self.getDBMatchData(dbName, returnData=True)
df = DataFrame({primaryKey: {"Artist": artistData["ArtistName"], "Albums": len(artistData["ArtistAlbums"])} for primaryKey,artistData in matchData.items()}).T
return df
def getMasterDF(self, dbName):
df = self.getDF(dbName)
amDF = self.mdbmaps[dbName].getDF()
mergeDF = df.join(amDF).copy(deep=True)
mergeDF["DBMatches"][mergeDF["DBMatches"].isna()] = 0
mergeDF = mergeDF.sort_values("Albums", ascending=False)
return mergeDF
def getDBMatchData(self, dbName, returnData=True):
if self.matchData.get(dbName) is not None:
return self.matchData[dbName]
print("Loading Artist Albums")
try:
artistsDF = self.artistData[dbName]
albumsDF = self.getArtistAlbumsDB(dbName)
except:
raise ValueError("Could not get artist/albums for DB {0} from [{1}]".format(dbName, list(self.artistData.keys())))
dbArtistAlbums = artistsDF[[self.finalArtistName]].join(albumsDF)
dbArtistAlbums["Albums"] = dbArtistAlbums["Albums"].apply(lambda x: getFlatList([albums.values() for media,albums in x.items()]))
matchData = {self.mdbmaps[dbName].getPrimaryKey(artistName=dbArtistData[self.finalArtistName], artistID=dbArtistID): {"ArtistName": dbArtistData[self.finalArtistName], "ArtistAlbums": dbArtistData["Albums"]} for dbArtistID,dbArtistData in dbArtistAlbums.T.to_dict().items() if dbArtistID is not None and len(dbArtistData[self.finalArtistName]) > 0}
self.setDBMatchData(dbName, matchData)
if returnData:
return matchData
def getArtistNameFromID(self, db, dbID):
print("Do I call this also???")
1/0
df = self.artistData[db]
adf = df[df.index == dbID]
if adf.shape[0] == 1:
retval = list(adf[self.finalArtistName])[0]
return retval
else:
return None
def getDBPrimaryKeys(self, db):
print("Do I call this???")
1/0
if self.matchData.get(db) is not None:
matchData = self.matchData[db]
else:
matchData = self.getDBMatchData(db)
dbPrimaryKeys = {primKey[1]: (primKey[0],primKey[1]) for primKey in matchData}
return dbPrimaryKeys
#############################################################################
## Master Call To Get Data To Match
#############################################################################
def getKnownCounter(self):
return self.knownCounter
def printCuts(self, cuts, debug=False):
if debug is False:
return
for k,v in cuts.items():
print("{0: <20} -> {1}".format(k,v))
def initialzeData(self, db):
################################################################################################################
# Get Match Data
################################################################################################################
if self.matchData.get(db) is not None:
matchData = self.matchData[db]
else:
matchData = self.getDBMatchData(db)
self.initFuncs["initialzeData"] = False
return matchData
def initialzeMatchData(self, db):
################################################################################################################
# Initialize Artists/DBs To Get
################################################################################################################
initArtistsData = Series(dict(zip(knownDBs, [0]*len(knownDBs))))
retvalArtistData = {primaryKey: initArtistsData.copy(deep=True) for primaryKey,artistData in matchData.items()}
retvalArtistData = | DataFrame(retvalArtistData) | pandas.DataFrame |
import copy
import gc
import os
import pickle
import sys
from functools import partial
from warnings import simplefilter
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from IPython.display import display
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.tree import DecisionTreeClassifier
from torch.utils.data import DataLoader
from tqdm.notebook import tqdm
from Evaluation import evaluate_df, correlation_vs_landmark, token_remotion_delta_performance
from FeatureContribution import FeatureContribution
from FeatureExtractor import FeatureExtractor
from Finetune import finetune_BERT
from Modelling import feature_importance
from Net import DatasetAccoppiate, NetAccoppiate, train_model
from WordEmbedding import WordEmbedding
from WordPairGenerator import WordPairGenerator, WordPairGeneratorEdit
class Routine:
@staticmethod
def plot_token_contribution(el_df, score_col='token_contribution', cut=0.0):
sns.set(rc={'figure.figsize': (10, 10)})
tmp_df = el_df.copy()
tmp_df = tmp_df.set_index(['left_word', 'right_word'])
tmp_df = tmp_df[tmp_df[score_col].abs() >= cut]
# colors = ['orange' ] * tmp_df.shape[0]
colors = np.where(tmp_df[score_col] >= 0, 'green', 'red')
g = tmp_df.plot(y=score_col, kind='barh', color=colors, alpha=.5, legend='')
# plt.xlim(-0.5, 0.5)
for p in g.patches:
offset = -10 if p.get_width() > 0 else 10
g.annotate(format(p.get_width(), '.3f'),
(p.get_width(), p.get_y()),
ha='right' if p.get_width() > 0 else 'left',
va='center',
xytext=(offset, 2.5),
textcoords='offset points')
plt.ylabel('')
g.axes.set_yticklabels(g.axes.get_yticklabels(), fontsize=14)
plt.tight_layout()
plt.show()
# g.get_figure().savefig(os.path.join(model_files_path,'examples',"microsoft.pdf"), dpi=400)
def __init__(self, dataset_name, dataset_path, project_path,
reset_files=False, model_name='BERT', device=None, reset_networks=False, clean_special_char=True,
col_to_drop=[], model_files_path=None,
softlab_path='./content/drive/Shareddrives/SoftLab/',
verbose=True, we_finetuned=False,
we_finetune_path=None, num_epochs=10,
sentence_embedding=True, we=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
simplefilter(action='ignore', category=FutureWarning)
simplefilter(action='ignore')
pd.options.display.float_format = '{:.4f}'.format
pd.options.display.max_rows = 150
pd.options.display.max_columns = 150
pd.options.display.max_colwidth = 100
pd.options.display.precision = 15
pd.options.display.max_info_columns = 150
plt.rcParams["figure.figsize"] = (18, 6)
self.softlab_path = os.path.join(softlab_path)
self.reset_files = reset_files # @ param {type:"boolean"}
self.reset_networks = reset_networks # @ param {type:"boolean"}
self.dataset_name = dataset_name
self.model_name = model_name
self.feature_extractor = FeatureExtractor()
self.verbose = verbose
self.sentence_embedding_dict = None
self.word_pair_model = None
self.word_pairing_kind = None
self.additive_only = False
self.cos_sim = None
self.feature_kind = None
if dataset_path is None:
self.dataset_path = os.path.join(softlab_path, 'Dataset', 'Entity Matching', dataset_name)
else:
self.dataset_path = dataset_path
self.project_path = os.path.join(softlab_path, 'Projects', 'Concept level EM (exclusive-inclluse words)')
if model_files_path is None:
self.model_files_path = os.path.join(self.project_path, 'dataset_files', dataset_name, model_name)
else:
self.model_files_path = model_files_path
try:
os.makedirs(self.model_files_path)
except Exception as e:
print(e)
pass
try:
os.makedirs(os.path.join(self.model_files_path, 'results'))
except Exception as e:
print(e)
pass
self.experiments = {}
sys.path.append(os.path.join(project_path, 'common_functions'))
sys.path.append(os.path.join(project_path, 'src'))
pd.options.display.max_colwidth = 130
self.train = pd.read_csv(os.path.join(dataset_path, 'train_merged.csv'))
self.test = pd.read_csv(os.path.join(dataset_path, 'test_merged.csv'))
self.valid = pd.read_csv(os.path.join(dataset_path, 'valid_merged.csv'))
if not hasattr(self, 'table_A'):
self.table_A = pd.read_csv(os.path.join(dataset_path, 'tableA.csv')).drop(col_to_drop, 1)
if not hasattr(self, 'table_B'):
self.table_B = pd.read_csv(os.path.join(dataset_path, 'tableB.csv')).drop(col_to_drop, 1)
if dataset_name == 'BeerAdvo-RateBeer':
table_A = pd.read_csv(os.path.join(dataset_path, 'tableA.csv'))
table_B = pd.read_csv(os.path.join(dataset_path, 'tableB.csv'))
for col in table_A.columns:
if table_A.dtypes[col] == object:
for c, to_replace in [(' _ ™ ', '\''), ('äº _', '\xE4\xBA\xAC'),
('_ œ', ''), (' _', ''),
('Ã ', 'Ã'), ('»', ''), ('$', '¤'), (' _ ™ ', '\''),
('1/2', '½'), ('1/4', '¼'), ('3/4', '¾'),
('Ãa', '\xC3\xADa'), ('Ä `` ', '\xC4\xAB'), (r'Ã$', '\xC3\xADa'),
('\. \.', '\.'),
('Å ¡', '\xC5\xA1'),
('Ã\'\' ', '\xC3\xBB'), ('_', '\xC3\x80'), ('à œ', ''),
('à ¶ ', '\xC3\xB6'), ('û¶ ', '\xC3\xB6'),
(' \.', ''), ('( ', ''), (' \&\#41; ', ''),
]:
table_A[col] = table_A[col].str.replace(c, to_replace)
table_B[col] = table_B[col].str.replace(c, to_replace)
pat = r"(?P<one>Ã)(?P<two>. )"
repl = lambda m: f'Ã{m.group("two")[0]}'
table_A[col] = table_A[col].str.replace(pat, repl)
table_A['Brew_Factory_Name'] = table_A['Brew_Factory_Name'].str.encode('latin-1').str.decode('utf-8')
self.table_A = table_A
self.table_B = table_B
left_ids = []
right_ids = []
for df in [self.train, self.valid, self.test]:
left_ids.append(df.left_id.values)
right_ids.append(df.right_id.values)
left_ids = np.unique(np.concatenate(left_ids))
right_ids = np.unique(np.concatenate(right_ids))
self.table_A[~self.table_A.id.isin(left_ids)] = None
self.table_B[~self.table_B.id.isin(right_ids)] = None
self.cols = np.setdiff1d(self.table_A.columns, ['id'])
self.lp = 'left_'
self.rp = 'right_'
if clean_special_char:
spec_chars = ["!", '"', "#", "%", "&", "'", "(", ")",
"*", "+", ",", "-", "/", ":", ";", "<",
"=", ">", "?", "@", "[", "\\", "]", "^", "_",
"`", "{", "|", "}", "~", "–", "´"]
for col in np.setdiff1d(self.table_A.columns, ['id']):
self.table_A[col] = self.table_A[col].astype(str). \
str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode(
'utf-8') + ' '
self.table_B[col] = self.table_B[col].astype(str). \
str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode(
'utf-8') + ' '
for char in spec_chars:
self.table_A[col] = self.table_A[col].str.replace(' \\' + char + ' ', ' ')
self.table_B[col] = self.table_B[col].str.replace(' \\' + char + ' ', ' ')
for char in ['-', '/', '\\']:
self.table_A[col] = self.table_A[col].str.replace(char, ' ')
self.table_B[col] = self.table_B[col].str.replace(char, ' ')
self.table_A[col] = self.table_A[col].str.split().str.join(" ").str.lower()
self.table_B[col] = self.table_B[col].str.split().str.join(" ").str.lower()
self.table_A = self.table_A.replace('^None$', np.nan, regex=True).replace('^nan$', np.nan, regex=True)
self.table_B = self.table_B.replace('^None$', np.nan, regex=True).replace('^nan$', np.nan, regex=True)
self.words_divided = {}
tmp_path = os.path.join(self.model_files_path, 'words_maps.pickle')
try:
assert self.reset_files == False, 'Reset_files'
with open(tmp_path, 'rb') as file:
self.words_divided = pickle.load(file)
print('Loaded ' + tmp_path)
except Exception as e:
print(e)
for name, df in zip(['table_A', 'table_B'], [self.table_A, self.table_B]):
self.words_divided[name] = WordPairGenerator.map_word_to_attr(df, self.cols, verbose=self.verbose)
with open(tmp_path, 'wb') as file:
pickle.dump(self.words_divided, file)
tmp_cols = ['id', 'left_id', 'right_id', 'label']
self.train_merged = pd.merge(
pd.merge(self.train[tmp_cols], self.table_A.add_prefix('left_'), on='left_id'),
self.table_B.add_prefix('right_'), on='right_id').sort_values('id').reset_index(drop='True')
self.test_merged = pd.merge(
pd.merge(self.test[tmp_cols], self.table_A.add_prefix('left_'), on='left_id'),
self.table_B.add_prefix('right_'), on='right_id').sort_values('id').reset_index(drop='True')
self.valid_merged = pd.merge(
pd.merge(self.valid[tmp_cols], self.table_A.add_prefix('left_'), on='left_id'),
self.table_B.add_prefix('right_'), on='right_id').sort_values('id').reset_index(drop='True')
for col, type in zip(['id', 'label'], ['UInt32', 'UInt8']):
self.train_merged[col] = self.train_merged[col].astype(type)
self.valid_merged[col] = self.valid_merged[col].astype(type)
self.test_merged[col] = self.test_merged[col].astype(type)
self.train = self.train_merged
self.valid = self.valid_merged
self.test = self.test_merged
self.sentence_embedding = sentence_embedding
if we is not None:
self.we = we
elif we_finetuned:
if we_finetune_path is not None:
finetuned_path = we_finetune_path
elif we_finetuned == 'SBERT':
model_save_path = os.path.join(self.model_files_path, 'sBERT')
if reset_files or os.path.isdir(model_save_path) is False:
finetuned_path = finetune_BERT(self, num_epochs=num_epochs, model_save_path=model_save_path)
else:
finetuned_path = model_save_path
else:
finetuned_path = os.path.join(self.project_path, 'dataset_files', 'finetuned_models', dataset_name)
self.we = WordEmbedding(device=self.device, verbose=verbose, model_path=finetuned_path,
sentence_embedding=sentence_embedding)
else:
self.we = WordEmbedding(device=self.device, verbose=verbose, sentence_embedding=sentence_embedding)
# def __del__(self):
# try:
# for value, key in self.embeddings.items():
# value.cpu()
# except Exception as e:
# print(e)
# pass
# try:
# self.we.mode.to('cpu')
# except Exception as e:
# print(e)
# pass
# gc.collect()
# torch.cuda.empty_cache()
# return super(Routine, self).__del__()
def generate_df_embedding(self, chunk_size=100):
self.embeddings = {}
if self.sentence_embedding:
self.sentence_embedding_dict = {}
self.words = {}
try:
assert self.reset_files == False, 'Reset_files'
for df_name in ['table_A', 'table_B']:
tmp_path = os.path.join(self.model_files_path, 'emb_' + df_name + '.csv')
with open(tmp_path, 'rb') as file:
self.embeddings[df_name] = torch.load(file, map_location=torch.device(self.device))
tmp_path = os.path.join(self.model_files_path, 'words_list_' + df_name + '.csv')
with open(tmp_path, 'rb') as file:
self.words[df_name] = pickle.load(file)
if self.sentence_embedding:
tmp_path = os.path.join(self.model_files_path, 'sentence_emb_' + df_name + '.csv')
with open(tmp_path, 'rb') as file:
self.sentence_embedding_dict[df_name] = torch.load(file, map_location=torch.device(self.device))
print('Loaded embeddings.')
except Exception as e:
print(e)
self.we.verbose = self.verbose
we = self.we
for name, df in [('table_A', self.table_A), ('table_B', self.table_B)]:
gc.collect()
torch.cuda.empty_cache()
if self.sentence_embedding:
emb, words, sentence_emb = we.generate_embedding(df, chunk_size=chunk_size)
self.sentence_embedding_dict[name] = sentence_emb
tmp_path = os.path.join(self.model_files_path, 'sentence_emb_' + name + '.csv')
with open(tmp_path, 'wb') as file:
torch.save(sentence_emb, file)
else:
emb, words = we.generate_embedding(df, chunk_size=chunk_size)
self.embeddings[name] = emb
self.words[name] = words
tmp_path = os.path.join(self.model_files_path, 'emb_' + name + '.csv')
with open(tmp_path, 'wb') as file:
torch.save(emb, file)
tmp_path = os.path.join(self.model_files_path, 'words_list_' + name + '.csv')
with open(tmp_path, 'wb') as file:
pickle.dump(words, file)
if self.sentence_embedding:
assert self.sentence_embedding_dict['table_A'][0].shape == torch.Size(
[
768]), f'Sentence emb has shape: {self.sentence_embedding_dict["table_A"][0].shape}. It must be [768]!'
def get_processed_data(self, df, chunk_size=500, verbose=False):
we = self.we
res = {}
for side in ['left', 'right']:
gc.collect()
torch.cuda.empty_cache()
if verbose:
print(f'Embedding {side} side')
prefix = self.lp if side == 'left' else self.rp
cols = [prefix + col for col in self.cols]
tmp_df = df.loc[:, cols]
res[side + '_word_map'] = WordPairGenerator.map_word_to_attr(tmp_df, self.cols, prefix=prefix,
verbose=self.verbose)
if self.sentence_embedding:
emb, words, sentence_emb = we.generate_embedding(tmp_df, chunk_size=chunk_size)
res[side + '_sentence_emb'] = sentence_emb
else:
emb, words = we.generate_embedding(tmp_df, chunk_size=chunk_size)
res[side + '_emb'] = emb
res[side + '_words'] = words
return res
def compute_word_pair(self, use_schema=True, **kwargs):
word_sim = self.word_pairing_kind == 'word_similarity'
words_pairs_dict, emb_pairs_dict = {}, {}
if self.sentence_embedding:
self.sentence_emb_pairs_dict = {}
try:
assert self.reset_files == False, 'Reset_files'
for df_name in ['train', 'valid', 'test']:
tmp_path = os.path.join(self.model_files_path, df_name + 'word_pairs.csv')
words_pairs_dict[df_name] = pd.read_csv(tmp_path, keep_default_na=False)
tmp_path = os.path.join(self.model_files_path, df_name + 'emb_pairs.csv')
with open(tmp_path, 'rb') as file:
emb_pairs_dict[df_name] = pickle.load(file)
if self.sentence_embedding:
tmp_path = os.path.join(self.model_files_path, df_name + 'sentence_emb_pairs.csv')
with open(tmp_path, 'rb') as file:
self.sentence_emb_pairs_dict[df_name] = pickle.load(file)
print('Loaded word pairs')
except Exception as e:
print(e)
if word_sim:
word_pair_generator = WordPairGeneratorEdit(df=self.test, use_schema=use_schema, device=self.device,
verbose=self.verbose,
words_divided=self.words_divided,
sentence_embedding_dict=self.sentence_embedding_dict,
**kwargs)
else:
word_pair_generator = WordPairGenerator(self.words, self.embeddings, words_divided=self.words_divided,
df=self.test,
use_schema=use_schema, device=self.device, verbose=self.verbose,
sentence_embedding_dict=self.sentence_embedding_dict,
**kwargs)
for df_name, df in zip(['train', 'valid', 'test'], [self.train, self.valid, self.test]):
if word_sim:
word_pairs = word_pair_generator.process_df(df)
else:
if self.sentence_embedding:
word_pairs, emb_pairs, sentence_emb_pairs = word_pair_generator.process_df(df)
self.sentence_emb_pairs_dict[df_name] = sentence_emb_pairs
else:
word_pairs, emb_pairs = word_pair_generator.process_df(df)
emb_pairs_dict[df_name] = emb_pairs
tmp_path = os.path.join(self.model_files_path, df_name + 'word_pairs.csv')
pd.DataFrame(word_pairs).to_csv(tmp_path, index=False)
tmp_path = os.path.join(self.model_files_path, df_name + 'emb_pairs.csv')
with open(tmp_path, 'wb') as file:
pickle.dump(emb_pairs, file)
if self.sentence_embedding:
tmp_path = os.path.join(self.model_files_path, df_name + 'sentence_emb_pairs.csv')
with open(tmp_path, 'wb') as file:
pickle.dump(self.sentence_emb_pairs_dict, file)
words_pairs_dict[df_name] = pd.DataFrame(word_pairs)
self.words_pairs_dict = words_pairs_dict
if not word_sim:
self.emb_pairs_dict = emb_pairs_dict
if self.sentence_embedding:
return self.words_pairs_dict, self.emb_pairs_dict, self.sentence_emb_pairs_dict
else:
return words_pairs_dict, emb_pairs_dict
def get_word_pairs(self, df, data_dict, use_schema=True,
# parallel=False,
**kwargs):
if self.word_pairing_kind is not None and self.word_pairing_kind == 'word_similarity':
wp = WordPairGeneratorEdit(df=df, use_schema=use_schema, device=self.device, verbose=self.verbose,
sentence_embedding_dict=self.sentence_embedding_dict, **kwargs)
res = wp.get_word_pairs(df, data_dict)
return res
else:
wp = WordPairGenerator(df=df, use_schema=use_schema, device=self.device, verbose=self.verbose,
sentence_embedding_dict=self.sentence_embedding_dict, **kwargs)
# if parallel:
# res = wp.get_word_pairs_parallel(df, data_dict)
# else:
res = wp.get_word_pairs(df, data_dict) # empty lost
if self.sentence_embedding:
word_pairs, emb_pairs, sent_emb_pairs = res
else:
word_pairs, emb_pairs = res
word_pairs = pd.DataFrame(word_pairs)
if self.sentence_embedding:
return word_pairs, emb_pairs, sent_emb_pairs
else:
return word_pairs, emb_pairs
def net_train(self, num_epochs=40, lr=3e-5, batch_size=256, word_pairs=None, emb_pairs=None,
sentence_emb_pairs=None,
valid_pairs=None, valid_emb=None, valid_sentence_emb_pairs=None):
if word_pairs is None or emb_pairs is None:
word_pairs = self.words_pairs_dict['train']
emb_pairs = self.emb_pairs_dict['train']
if self.sentence_embedding:
sentence_emb_pairs = self.sentence_emb_pairs_dict['train']
else:
sentence_emb_pairs = None
if valid_pairs is None or valid_emb is None:
valid_pairs = self.words_pairs_dict['valid']
valid_emb = self.emb_pairs_dict['valid']
if self.sentence_embedding:
valid_sententce_emb_pairs = self.sentence_emb_pairs_dict['valid']
else:
valid_sententce_emb_pairs = None
data_loader = DatasetAccoppiate(word_pairs, emb_pairs, sentence_embedding_pairs=sentence_emb_pairs)
self.train_data_loader = data_loader
best_model = NetAccoppiate(sentence_embedding=self.sentence_embedding, )
device = self.device
tmp_path = os.path.join(self.model_files_path, 'net0.pickle')
try:
assert self.reset_networks == False, 'resetting networks'
best_model.load_state_dict(torch.load(tmp_path, map_location=torch.device(device)))
except Exception as e:
print(e)
net = NetAccoppiate(sentence_embedding=self.sentence_embedding)
net.to(device)
criterion = nn.BCELoss().to(device)
# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=.9)
optimizer = optim.Adam(net.parameters(), lr=lr)
train_dataset = data_loader
valid_dataset = copy.deepcopy(train_dataset)
valid_dataset.__init__(valid_pairs, valid_emb, sentence_embedding_pairs=valid_sententce_emb_pairs)
dataloaders_dict = {'train': DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4),
'valid': DataLoader(valid_dataset, batch_size=batch_size, shuffle=True, num_workers=4)}
best_model, score_history, last_model = train_model(net,
dataloaders_dict, criterion, optimizer,
nn.MSELoss().to(device), num_epochs=num_epochs,
device=device)
# optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=.9)
# best_model, score_history, last_model = train_model(net,dataloaders_dict, criterion, optimizer,nn.MSELoss().to(device), num_epochs=150, device=device)
out = net(valid_dataset.X.to(device))
print(f'best_valid --> mean:{out.mean():.4f} std: {out.std():.4f}')
out = last_model(valid_dataset.X.to(device))
print(f'last_model --> mean:{out.mean():.4f} std: {out.std():.4f}')
print('Save...')
torch.save(best_model.state_dict(), tmp_path)
self.word_pair_model = best_model
return best_model
def preprocess_word_pairs(self, **kwargs):
features_dict = {}
words_pairs_dict = {}
for name in ['train', 'valid', 'test']:
if self.sentence_embedding:
sentence_emb_pairs = self.sentence_emb_pairs_dict[name]
else:
sentence_emb_pairs = None
if self.word_pairing_kind == 'word_similarity':
feat, word_pairs = self.extract_features(None, self.words_pairs_dict[name],
None, None,
sentence_emb_pairs=sentence_emb_pairs,
additive_only=self.additive_only, **kwargs)
else:
feat, word_pairs = self.extract_features(self.word_pair_model, self.words_pairs_dict[name],
self.emb_pairs_dict[name], self.train_data_loader,
sentence_emb_pairs=sentence_emb_pairs,
additive_only=self.additive_only, **kwargs)
features_dict[name] = feat
words_pairs_dict[name] = word_pairs
self.features_dict, self.words_pairs_dict = features_dict, words_pairs_dict
return features_dict, words_pairs_dict
def extract_features(self, model: NetAccoppiate, word_pairs, emb_pairs, train_data_loader, sentence_emb_pairs=None,
**kwargs):
if self.cos_sim is not None or self.word_pairing_kind == 'word_similarity':
if self.cos_sim == 'binary':
word_pair_corrected = word_pairs
word_pair_corrected['pred'] = np.where(word_pair_corrected['cos_sim'] > 0, 1, 0)
else:
word_pair_corrected = word_pairs
word_pair_corrected['pred'] = word_pair_corrected['cos_sim']
else:
model.eval()
model.to(self.device)
data_loader = train_data_loader
data_loader.__init__(word_pairs, emb_pairs, sentence_emb_pairs)
word_pair_corrected = data_loader.word_pairs_corrected
with torch.no_grad():
word_pair_corrected['pred'] = model(data_loader.X.to(self.device)).cpu().detach().numpy()
# features = self.feature_extractor.extract_features(word_pair_corrected, **kwargs)
if self.feature_kind == 'min':
features = self.feature_extractor.extract_features_min(word_pair_corrected, **kwargs)
else:
features = self.feature_extractor.extract_features_by_attr(word_pair_corrected, self.cols, **kwargs)
return features, word_pair_corrected
def EM_modelling(self, *args, do_evaluation=False, do_feature_selection=False, results_path='results'):
if self.additive_only and results_path == 'results':
results_path = 'results_additive'
if hasattr(self, 'models') == False:
mmScaler = MinMaxScaler()
mmScaler.clip = False
self.models = [
('LR',
Pipeline([('mm', copy.copy(mmScaler)), ('LR', LogisticRegression(max_iter=200, random_state=0))])),
('LR_std',
Pipeline(
[('mm', copy.copy(StandardScaler())), ('LR', LogisticRegression(max_iter=200, random_state=0))])),
('LDA', Pipeline([('mm', copy.copy(mmScaler)), ('LDA', LinearDiscriminantAnalysis())])),
('LDA_std', Pipeline([('mm', copy.copy(StandardScaler())), ('LDA', LinearDiscriminantAnalysis())])),
('KNN', Pipeline([('mm', copy.copy(mmScaler)), ('KNN', KNeighborsClassifier())])),
('CART', DecisionTreeClassifier(random_state=0)),
('NB', GaussianNB()),
# ('SVM', Pipeline([('mm', copy.copy(mmScaler)), ('SVM', SVC(probability=True, random_state=0))])),
# ('AB', AdaBoostClassifier(random_state=0)),
('GBM', GradientBoostingClassifier(random_state=0)),
('RF', RandomForestClassifier(random_state=0)),
# ('ET', ExtraTreesClassifier(random_state=0)),
('dummy', DummyClassifier(strategy='stratified', random_state=0)),
]
# models.append(('Vote', VotingClassifier(models[:-1], voting='soft')))
model_names = [x[0] for x in self.models]
X_train, y_train = self.features_dict['train'].to_numpy(), self.train.label.astype(int)
X_valid, y_valid = self.features_dict['valid'].to_numpy(), self.valid.label.astype(int)
X_test, y_test = self.features_dict['test'].to_numpy(), self.test.label.astype(int)
res = {(x, y): [] for x in ['train', 'valid', 'test'] for y in ['f1', 'precision', 'recall']}
df_pred = {}
for name, model in tqdm(self.models):
model.fit(X_train, y_train)
for turn_name, turn_df, turn_y in zip(['train', 'valid', 'test'], [X_train, X_valid, X_test],
[y_train, y_valid, y_test]):
df_pred[turn_name] = model.predict(turn_df)
for score_name, scorer in [['f1', f1_score], ['precision', precision_score], ['recall', recall_score]]:
score_value = scorer(turn_y, df_pred[turn_name])
res[(turn_name, score_name)].append(score_value)
if turn_name == 'test' and score_name == 'f1':
print(f'{name:<10}-{score_name} {score_value}')
print('before feature selection')
res_df = pd.DataFrame(res, index=model_names)
res_df.index.name = 'model_name'
try:
os.makedirs(os.path.join(self.model_files_path, results_path))
except Exception as e:
print(e)
pass
res_df.to_csv(os.path.join(self.model_files_path, results_path, 'performances.csv'))
display(res_df)
best_f1 = res_df[('test', 'f1')].max()
best_features = self.features_dict['train'].columns
best_model_name = res_df.iloc[[res_df[('test', 'f1')].argmax()]].index.values[0]
for x in self.models:
if x[0] == best_model_name:
best_model = x[1]
# Feature selection
if do_feature_selection:
print('running feature score')
score_df = {'feature': [], 'score': []}
X_train, y_train = self.features_dict['train'], self.train.label.astype(int)
X_valid, y_valid = self.features_dict['valid'], self.valid.label.astype(int)
X_test, y_test = self.features_dict['test'], self.test.label.astype(int)
cols = self.features_dict['train'].columns
new_cols = cols
different = True
iter = 0
while different and iter <= 2:
cols = new_cols
score_df, res_df, new_cols = feature_importance(X_train, y_train, X_valid, y_valid, cols)
different = len(cols) != len(new_cols)
iter += 1
self.score_df = score_df
self.res_df = res_df
selected_features = new_cols
res = {(x, y): [] for x in ['train', 'valid', 'test'] for y in ['f1', 'precision', 'recall']}
print('Running models')
for name, model in tqdm(self.models):
model.fit(X_train, y_train)
for turn_name, turn_df, turn_y in zip(['train', 'valid', 'test'], [X_train, X_valid, X_test],
[y_train, y_valid, y_test]):
df_pred[turn_name] = model.predict(turn_df)
for score_name, scorer in [['f1', f1_score], ['precision', precision_score],
['recall', recall_score]]:
res[(turn_name, score_name)].append(scorer(turn_y, df_pred[turn_name]))
self.models = self.models
res_df = pd.DataFrame(res, index=model_names)
res_df.index.name = 'model_name'
display(res_df)
if best_f1 < res_df[('test', 'f1')].max():
best_f1 = res_df[('test', 'f1')].max()
best_features = selected_features
best_model_name = res_df.iloc[[res_df[('test', 'f1')].argmax()]].index.values[0]
for x in self.models:
if x[0] == best_model_name:
best_model = x[1]
res_df.to_csv(os.path.join(self.model_files_path, results_path, 'performances.csv'))
X_train, y_train = self.features_dict['train'][best_features].to_numpy(), self.train.label.astype(int)
best_model.fit(X_train, y_train)
model_data = {'features': best_features, 'model': best_model}
tmp_path = os.path.join(self.model_files_path, 'best_feature_model_data.pickle')
self.best_model_data = model_data
with open(tmp_path, 'wb') as file:
pickle.dump(model_data, file)
linear_model = Pipeline([('LR', LogisticRegression(max_iter=200, random_state=0))])
# LogisticRegression(max_iter=200, random_state=0)
X_train, y_train = self.features_dict['train'][best_features].to_numpy(), self.train.label.astype(int)
linear_model.fit(X_train, y_train)
model_data = {'features': best_features, 'model': linear_model}
tmp_path = os.path.join(self.model_files_path, 'linear_model.pickle')
with open(tmp_path, 'wb') as file:
pickle.dump(model_data, file)
# save unit_contribution
# co = linear_model['LR'].coef_
# co_df = pd.DataFrame(co, columns=self.features_dict['valid'].columns).T
# for name in ['train','valid']
# turn_contrib = FeatureContribution.extract_features_by_attr(word_relevance, routine.cols)
# for x in co_df.index:
# turn_contrib[x] = turn_contrib[x] * co_df.loc[x, 0]
# data = turn_contrib.sum(1).to_numpy().reshape(-1, 1)
# word_relevance['token_contribution'] = data
if do_evaluation:
self.evaluation(self.valid_merged)
return res_df
def get_match_score(self, features_df, lr=False, reload=False):
if lr is True:
tmp_path = os.path.join(self.model_files_path, 'linear_model.pickle')
else:
tmp_path = os.path.join(self.model_files_path, 'best_feature_model_data.pickle')
if not hasattr(self, 'best_model_data') or reload:
with open(tmp_path, 'rb') as file:
model_data = pickle.load(file)
self.best_model_data = model_data
self.model = self.best_model_data['model']
X = features_df[self.best_model_data['features']].to_numpy()
if isinstance(self.model, Pipeline) and isinstance(self.model[0], MinMaxScaler):
self.model[0].clip = False
return self.model.predict_proba(X)[:, 1]
def plot_rf(self, rf, columns):
pd.DataFrame([rf.feature_importances_], columns=columns).T.plot.bar(figsize=(25, 5));
def get_relevance_scores(self, word_pairs, emb_pairs, sentence_emb_pairs=None, **kwargs): # m2
feat, word_pairs = self.extract_features(emb_pairs=emb_pairs, word_pairs=word_pairs,
model=self.word_pair_model, train_data_loader=self.train_data_loader,
sentence_emb_pairs=sentence_emb_pairs,
additive_only=self.additive_only,
**kwargs)
return feat, word_pairs
def get_predictor(self):
self.reset_networks = False
self.net_train()
def predictor(df_to_process, routine, return_data=False, lr=False, chunk_size=500, reload=False,
additive_only=self.additive_only):
df_to_process = df_to_process.copy()
if 'id' not in df_to_process.columns:
df_to_process = df_to_process.reset_index(drop=True)
df_to_process['id'] = df_to_process.index
gc.collect()
torch.cuda.empty_cache()
data_dict = routine.get_processed_data(df_to_process, chunk_size=chunk_size)
res = routine.get_word_pairs(df_to_process, data_dict)
features, word_relevance = routine.get_relevance_scores(*res)
if lr:
match_score = routine.get_match_score(features, lr=lr, reload=reload)
else:
match_score = routine.get_match_score(features, reload=reload)
match_score_series = pd.Series(0.5, index=df_to_process.id)
match_score_series[features.index] = match_score
match_score = match_score_series.values
if return_data:
if lr:
lr = routine.model['LR']
co = lr.coef_
co_df = pd.DataFrame(co, columns=routine.features_dict['test'].columns).T
turn_contrib = FeatureContribution.extract_features_by_attr(word_relevance, routine.cols,
additive_only=additive_only)
for x in co_df.index:
turn_contrib[x] = turn_contrib[x] * co_df.loc[x, 0]
data = turn_contrib.sum(1).to_numpy().reshape(-1, 1)
word_relevance['token_contribution'] = data
return match_score, data_dict, res, features, word_relevance
else:
return match_score
return partial(predictor, routine=self)
def get_calculated_data(self, df_name):
features = self.features_dict[df_name]
word_relevance = self.words_pairs_dict[df_name]
pred = self.get_match_score(features, lr=True, reload=True)
lr = self.model['LR']
co = lr.coef_
co_df = | pd.DataFrame(co, columns=self.features_dict[df_name].columns) | pandas.DataFrame |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Activation, Dropout, Conv2D, MaxPooling2D, BatchNormalization, Flatten
from tensorflow.keras.optimizers import Adam, Adamax
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras import regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model, load_model, Sequential
import numpy as np
import pandas as pd
# 高级文件操作库
import shutil
import time
import cv2 as cv2
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import seaborn as sns
sns.set_style('darkgrid')
from PIL import Image
from sklearn.metrics import confusion_matrix, classification_report
from IPython.core.display import display, HTML
# stop annoying tensorflow warning messages
import logging
plt.rcParams['font.sans-serif'] = ['SimHei']
logging.getLogger("tensorflow").setLevel(logging.ERROR)
print('modules loaded')
# 显示数据集图片
def show_image_samples(gen):
t_dict = gen.class_indices
print(t_dict)
classes = list(t_dict.keys())
images, labels = next(gen) # get a sample batch from the generator
plt.figure(figsize=(20, 20))
length = len(labels)
if length < 25: # show maximum of 25 images
r = length
else:
r = 25
for i in range(r):
plt.subplot(5, 5, i + 1)
image = images[i] / 255
plt.imshow(image)
index = np.argmax(labels[i])
class_name = classes[index]
plt.title(class_name, color='blue', fontsize=12)
plt.axis('off')
plt.show()
# 显示图片
def show_images(tdir):
classlist = os.listdir(tdir)
length = len(classlist)
columns = 5
rows = int(np.ceil(length / columns))
plt.figure(figsize=(20, rows * 4))
for i, klass in enumerate(classlist):
classpath = os.path.join(tdir, klass)
imgpath = os.path.join(classpath, '1.jpg')
img = plt.imread(imgpath)
plt.subplot(rows, columns, i + 1)
plt.axis('off')
plt.title(klass, color='blue', fontsize=12)
plt.imshow(img)
# 输出颜色
def print_in_color(txt_msg, fore_tupple, back_tupple):
# prints the text_msg in the foreground color specified by fore_tupple with the background specified by back_tupple
# text_msg is the text, fore_tupple is foregroud color tupple (r,g,b), back_tupple is background tupple (r,g,b)
rf, gf, bf = fore_tupple
rb, gb, bb = back_tupple
msg = '{0}' + txt_msg
mat = '\33[38;2;' + str(rf) + ';' + str(gf) + ';' + str(bf) + ';48;2;' + str(rb) + ';' + str(gb) + ';' + str(
bb) + 'm'
print(msg.format(mat), flush=True)
print('\33[0m', flush=True) # returns default print color to back to black
return
# print_in_color("wow", (244, 252, 3), (55, 65, 80))
# 定义自定义回调的代码
class LRA(keras.callbacks.Callback):
def __init__(self, model, base_model, patience, stop_patience,
threshold, factor, dwell, batches, initial_epoch,
epochs, ask_epoch):
super(LRA, self).__init__()
self.model = model
self.base_model = base_model
# 指定在未调整学习率之前的遍历次数
self.patience = patience
# 指定调整学习率之前的训练次数后停止
self.stop_patience = stop_patience
# 指定当需要进行学习率调整时的训练准确率阈值
self.threshold = threshold
# 学习率降低的程度
self.factor = factor
self.dwell = dwell
# 每个时期运行的训练批次数
self.batches = batches
self.initial_epoch = initial_epoch
self.epochs = epochs
self.ask_epoch = ask_epoch
# 保存训练结果,以在训练重新开始时恢复
self.ask_epoch_initial = ask_epoch
# 回调变量 记录 lr减少了多少次的情况下,算法性能 没能得到改善
self.count = 0
self.stop_count = 0
# 记录损失最低的那一次迭代的次数
self.best_epoch = 1
# 获取初始学习率并保存
self.initial_lr = float(
tf.keras.backend.get_value(model.optimizer.lr))
# 将最高训练精度初始化为0
self.highest_tracc = 0.0
# 将最低验证精度设置为无穷大
self.lowest_vloss = np.inf
# 将最佳权重设置为初始权重(后面会进行更新)
self.best_weights = self.model.get_weights()
# 如果必须恢复,请保存初始权重
self.initial_weights = self.model.get_weights()
# 训练开始时:记录和输出一些提示信息
def on_train_begin(self, logs=None):
if self.base_model != None:
status = self.base_model.trainable
if status:
msg = 'base_model是可训练的'
else:
msg = 'base_model是不可训练的'
else:
msg = 'base_model不存在'
print_in_color(msg, (244, 252, 3), (55, 65, 80))
msg = '{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}' \
'{8:10s}{9:^8s}'.format('Epoch', 'Loss',
'Accuracy',
'V_loss', 'V_acc', 'LR',
'Next LR', 'Monitor',
'% Improv', 'Duration')
print_in_color(msg, (244, 252, 3), (55, 65, 80))
self.start_time = time.time()
# 训练结束时:记录和输出一些提示信息
def on_train_end(self, logs=None):
self.stop_time = time.time()
# 获取训练时间
tr_duration = self.stop_time - self.start_time
# 计算共多少小时
hours = tr_duration // 3600
# 计算多余时间是几分钟
minutes = (tr_duration - (hours * 3600)) // 60
# 计算多余秒是几秒
seconds = tr_duration - ((hours * 3600) + (minutes * 60))
# 设置模型的权重为之前记录的最好的权重
self.model.set_weights(self.best_weights)
# 训练完成,模型权重设置为最好的那次训练结果的权重
msg = f'训练完成,模型权重设置为最好的第 {self.best_epoch} 次训练结果的权重'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
msg = f'训练花费时间: {str(hours)} 时, {minutes:4.1f} 分, {seconds:4.2f} 秒)'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
# 一个小批次训练结束
def on_train_batch_end(self, batch, logs=None):
# 获取训练准确率
acc = logs.get('accuracy') * 100
loss = logs.get('loss')
msg = '{0:20s}processing batch {1:4s} of {2:5s} accuracy= {3:8.3f}' \
' loss: {4:8.5f}'.format('', str(batch),
str(self.batches),
acc, loss)
# 在同一行上打印以显示正在运行的批次
print(msg, '\r', end='')
# 一个训练次开始
def on_epoch_begin(self, epoch, logs=None):
self.now = time.time()
# def on_epoch_end(self, epoch, logs=None):
# self.later = time.time()
# # duration 是期间的意思
# duration = self.later - self.now
# # 获取当前学习率
# lr = float(tf.keras.backend.get_value(self.model.optimizer.lr))
#
# current_lr = lr
# v_loss = logs.get('val_loss')
# # 获取训练准确率
# acc = logs.get('accuracy')
# v_acc = logs.get('val_accuracy')
# loss = logs.get('loss')
#
# # 如果训练精度低于阈值,则根据训练精度调整lr
# if acc < self.threshold:
# monitor = 'accuracy'
# if epoch == 0:
# pimprov = 0.0
# else:
# pimprov = (acc - self.highest_tracc) * 100 / self.highest_tracc
#
# # 提高了训练精度
# if acc > self.highest_tracc:
# # 设置新的最高训练精度
# self.highest_tracc = acc
# # 将最好的权重保存到变量中
# self.best_weights = self.model.get_weights()
#
# # 将训练精度没有提升的次数计次 归零
# self.count = 0
# self.stop_count = 0
#
# if v_loss < self.lowest_vloss:
# self.lowest_vloss = v_loss
# color = (0, 255, 0)
#
# self.best_epoch = epoch + 1
# else:
# # 训练准确性没有提高检查是否超出了耐心数
# if self.count > self.patience - 1:
# color = (245, 170, 66)
# lr = lr * self.factor
# # 在优化器中设置学习率
# tf.keras.backend.set_value(self.model.optimizer.lr, lr)
# self.count = 0
# # 统计lr调整的次数
# self.stop_count = self.stop_count + 1
#
# if self.dwell:
# self.model.set_weights(self.best_weights)
# else:
# if v_loss < self.lowest_vloss:
# self.lowest_vloss = v_loss
#
# else:
# # 增加已用耐心次数
# self.count = self.count + 1
# # 训练准确率高于阈值,因此根据验证损失调整学习率
# else:
# # 监视
# monitor = 'val_loss'
# if epoch == 0:
# pimprov = 0.0
#
# else:
# pimprov = (self.lowest_vloss - v_loss) * 100 / self.lowest_vloss
# # 检查验证损失是否有改进
# if v_loss < self.lowest_vloss:
# # 用新的验证损失代替旧的最低损失
# self.lowest_vloss = v_loss
# # 更换权重,该权重为最好的权重
# self.best_weights = self.model.get_weights()
# # 更换无进度统计
# self.count = 0
# self.stop_count = 0
# color = (0, 255, 0)
# # 记录这次迭代是目前为止最好的迭代
# self.best_epoch = epoch + 1
# # 损失无改进
# else:
# # 耐心耗尽,需要更新学习率
# if self.count >= self.patience - 1:
# color = (245, 170, 66)
# # 修改学习率
# lr = lr * self.factor
# self.stop_count = self.stop_count + 1
#
# self.count = 0
# # 修改优化器中的学习率
# keras.backend.set_value(self.model.optimizer.lr, lr)
#
# if self.dwell:
# # 返回最好的权重
# self.model.set_weights(self.best_weights)
# else:
# # 还有耐心,继续迭代
# self.count = self.count + 1
#
# if acc > self.highest_tracc:
# self.highest_tracc = acc
#
# msg = f'{str(epoch + 1):^3s}/{str(self.epochs):4s}' \
# f' {loss:^9.3f}{acc * 100:^9.3f}{v_loss:^9.5f}{v_acc * 100:^9.3f}' \
# f'{current_lr:^9.5f}{lr:^9.5f}{monitor:^11s}{pimprov:^10.2f}{duration:^8.2f}'
#
# print_in_color(msg, color, (55, 65, 80))
# if self.stop_count > self.stop_patience - 1:
# # 检查学习率是否已调整 stop_count 次而没有改善学习效果
# msg = f' 训练在 {epoch + 1}' \
# f' 次后停止了, {self.stop_patience} 次调整学习率没有改进效果'
# print_in_color(msg, (0, 255, 255), (55, 65, 80))
# # 停止训练
# self.model.stop_training = True
# else:
# if self.ask_epoch != None:
# if epoch + 1 >= self.ask_epoch:
# if self.base_model.trainable:
# msg = '输入一个H以停止训练,或者输入一个整数以继续尝试训练'
# else:
# msg = '输入一个H以停止训练,F微调模型,输入一个整数以继续尝试训练'
#
# print_in_color(msg, (0, 255, 255), (55, 65, 80))
# ans = input('')
# if ans == 'H' or ans == 'h':
# msg = f'训练已在epoch {epoch + 1} 停止'
# print_in_color(msg, (0, 255, 255), (55, 65, 80))
# self.model.stop_training = True # stop training
# elif ans == 'F' or ans == 'f':
# if self.base_model.trainable:
# msg = 'base_model 一直允许训练'
# else:
# msg = '将base_model 设置为可训练以进行微调'
# self.base_model.trainable = True
# print_in_color(msg, (0, 255, 255), (55, 65, 80))
# msg = '{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:^8s}'.format(
# 'Epoch', 'Loss', 'Accuracy',
# 'V_loss', 'V_acc', 'LR', 'Next LR', 'Monitor', '% Improv', 'Duration')
# print_in_color(msg, (244, 252, 3), (55, 65, 80))
# self.count = 0
# self.stop_count = 0
# self.ask_epoch = epoch + 1 + self.ask_epoch_initial
#
# else:
# ans = int(ans)
# self.ask_epoch += ans
# msg = f'训练将继续' + str(self.ask_epoch)
# print_in_color(msg, (0, 255, 255), (55, 65, 80))
#
# msg = '{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format(
# 'Epoch', 'Loss', 'Accuracy',
# 'V_loss', 'V_acc', 'LR', 'Next LR', 'Monitor', '% Improv', 'Duration')
# print_in_color(msg, (244, 252, 3), (55, 65, 80))
def on_epoch_end(self, epoch, logs=None): # method runs on the end of each epoch
later = time.time()
duration = later - self.now
lr = float(tf.keras.backend.get_value(self.model.optimizer.lr)) # get the current learning rate
current_lr = lr
v_loss = logs.get('val_loss') # get the validation loss for this epoch
acc = logs.get('accuracy') # get training accuracy
v_acc = logs.get('val_accuracy')
loss = logs.get('loss')
if acc < self.threshold: # if training accuracy is below threshold adjust lr based on training accuracy
monitor = 'accuracy'
if epoch == 0:
pimprov = 0.0
else:
pimprov = (acc - self.highest_tracc) * 100 / self.highest_tracc
if acc > self.highest_tracc: # training accuracy improved in the epoch
self.highest_tracc = acc # set new highest training accuracy
self.best_weights = self.model.get_weights() # traing accuracy improved so save the weights
self.count = 0 # set count to 0 since training accuracy improved
self.stop_count = 0 # set stop counter to 0
if v_loss < self.lowest_vloss:
self.lowest_vloss = v_loss
color = (0, 255, 0)
self.best_epoch = epoch + 1 # set the value of best epoch for this epoch
else:
# training accuracy did not improve check if this has happened for patience number of epochs
# if so adjust learning rate
if self.count >= self.patience - 1: # lr should be adjusted
color = (245, 170, 66)
lr = lr * self.factor # adjust the learning by factor
tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer
self.count = 0 # reset the count to 0
self.stop_count = self.stop_count + 1 # count the number of consecutive lr adjustments
self.count = 0 # reset counter
if self.dwell:
self.model.set_weights(
self.best_weights) # return to better point in N space
else:
if v_loss < self.lowest_vloss:
self.lowest_vloss = v_loss
else:
self.count = self.count + 1 # increment patience counter
else: # training accuracy is above threshold so adjust learning rate based on validation loss
monitor = 'val_loss'
if epoch == 0:
pimprov = 0.0
else:
pimprov = (self.lowest_vloss - v_loss) * 100 / self.lowest_vloss
if v_loss < self.lowest_vloss: # check if the validation loss improved
self.lowest_vloss = v_loss # replace lowest validation loss with new validation loss
self.best_weights = self.model.get_weights() # validation loss improved so save the weights
self.count = 0 # reset count since validation loss improved
self.stop_count = 0
color = (0, 255, 0)
self.best_epoch = epoch + 1 # set the value of the best epoch to this epoch
else: # validation loss did not improve
if self.count >= self.patience - 1: # need to adjust lr
color = (245, 170, 66)
lr = lr * self.factor # adjust the learning rate
self.stop_count = self.stop_count + 1 # increment stop counter because lr was adjusted
self.count = 0 # reset counter
tf.keras.backend.set_value(self.model.optimizer.lr, lr) # set the learning rate in the optimizer
if self.dwell:
self.model.set_weights(self.best_weights) # return to better point in N space
else:
self.count = self.count + 1 # increment the patience counter
if acc > self.highest_tracc:
self.highest_tracc = acc
msg = f'{str(epoch + 1):^3s}/{str(self.epochs):4s} {loss:^9.3f}{acc * 100:^9.3f}{v_loss:^9.5f}{v_acc * 100:^9.3f}{current_lr:^9.5f}{lr:^9.5f}{monitor:^11s}{pimprov:^10.2f}{duration:^8.2f}'
print_in_color(msg, color, (55, 65, 80))
if self.stop_count > self.stop_patience - 1: # check if learning rate has been adjusted stop_count times with no improvement
msg = f' training has been halted at epoch {epoch + 1} after {self.stop_patience} adjustments of learning rate with no improvement'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
self.model.stop_training = True # stop training
else:
if self.ask_epoch != None:
if epoch + 1 >= self.ask_epoch:
if base_model.trainable:
msg = 'enter H to halt training or an integer for number of epochs to run then ask again'
else:
msg = 'enter H to halt training ,F to fine tune model, or an integer for number of epochs to run then ask again'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
ans = input('')
if ans == 'H' or ans == 'h':
msg = f'training has been halted at epoch {epoch + 1} due to user input'
print_in_color(msg, (0, 255, 255), (55, 65, 80))
self.model.stop_training = True # stop training
elif ans == 'F' or ans == 'f':
if base_model.trainable:
msg = 'base_model is already set as trainable'
else:
msg = 'setting base_model as trainable for fine tuning of model'
self.base_model.trainable = True
print_in_color(msg, (0, 255, 255), (55, 65, 80))
msg = '{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:^8s}'.format('Epoch',
'Loss',
'Accuracy',
'V_loss',
'V_acc', 'LR',
'Next LR',
'Monitor',
'% Improv',
'Duration')
print_in_color(msg, (244, 252, 3), (55, 65, 80))
self.count = 0
self.stop_count = 0
self.ask_epoch = epoch + 1 + self.ask_epoch_initial
else:
ans = int(ans)
self.ask_epoch += ans
msg = f' training will continue until epoch ' + str(self.ask_epoch)
print_in_color(msg, (0, 255, 255), (55, 65, 80))
msg = '{0:^8s}{1:^10s}{2:^9s}{3:^9s}{4:^9s}{5:^9s}{6:^9s}{7:^10s}{8:10s}{9:^8s}'.format('Epoch',
'Loss',
'Accuracy',
'V_loss',
'V_acc',
'LR',
'Next LR',
'Monitor',
'% Improv',
'Duration')
print_in_color(msg, (244, 252, 3), (55, 65, 80))
# 定义一个函数绘制训练数据
def tr_plot(tr_data, start_epoch):
# 绘制训练数据和验证数据
tacc = tr_data.history["accuracy"]
tloss = tr_data.history["loss"]
vacc = tr_data.history["val_accuracy"]
vloss = tr_data.history["val_loss"]
# 计算最终迭代了多少次
Epoch_count = len(tacc) + start_epoch
Epochs = [i + 1 for i in range(start_epoch, Epoch_count)]
index_loss = np.argmin(vloss)
val_lowest = vloss[index_loss]
index_acc = np.argmax(vacc)
acc_highest = vacc[index_acc]
sc_label = 'best epoch=' + str(index_loss + 1 + start_epoch)
vc_label = 'best epoch=' + str(index_acc + 1 + start_epoch)
# 创建图表
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
axes[0].plot(Epochs, tloss, 'r', label='训练损失')
axes[0].plot(Epochs, vloss, 'g', label='验证损失')
axes[0].scatter(index_loss + 1 + start_epoch, val_lowest, s=150, c="blue", label=sc_label)
axes[0].set_title('训练和验证损失')
axes[0].set_xlabel("迭代次数")
axes[0].set_ylabel("损失")
axes[0].legend()
axes[1].plot(Epochs, tacc, 'r', label='训练准确率')
axes[1].plot(Epochs, vacc, 'g', label='验证准确率')
axes[1].scatter(index_acc + 1 + start_epoch, acc_highest, s=150, c='blue', label=val_lowest)
axes[1].set_title("训练和验证损失")
axes[1].set_xlabel("迭代次数")
axes[1].set_ylabel("准确率")
axes[1].legend()
plt.tight_layout
plt.show()
# 定义一个函数创建混淆矩阵和分类报告
def print_info(test_gen, preds, print_code, save_dir, subject):
"""
:param test_gen: 测试集数据集生成器(其指定了生成方式,通常是指向本地图片库)
:param preds: 预测结果
:param print_code:
:param save_dir: 保存目录
:param subject:
:return:
"""
# 获取类名及下标字典
class_dict = test_gen.class_indices
# 获取所有类名
labels = test_gen.labels
# 获取所有文件名称
file_names = test_gen.filenames
error_list = []
true_class = []
pred_class = []
prob_list = []
# 按下标为key 类名为value创建一个新的字典
new_dict = {}
error_indies = []
# 实际预测值数组
y_pred = []
for key, value in class_dict.items():
new_dict[value] = key
# 将所有类名作为目录存储在save_dir下
classes = list(new_dict.values())
# 记录错误的分类次数
errors = 0
for i, p in enumerate(preds):
# 预测值
pred_index = np.argmax(p)
# 实际值
true_index = labels[i]
# 如果预测错误
if pred_index != true_index:
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
# 预测的最高概率装进prob
prob_list.append(p[pred_index])
error_indies.append(true_index)
errors = errors + 1
y_pred.append(pred_index)
if print_code != 0:
if errors > 0:
if print_code > errors:
r = errors
else:
r = print_code
msg = '{0:^28s}{1:^28s}{2:^28s}{3:^16s}' \
.format('Filename', 'Predicted Class', 'True Class', 'Probability')
print_in_color(msg, (0, 255, 0), (55, 65, 80))
for i in range(r):
# TODO 暂时不知道这几行代码干嘛的
split1 = os.path.split(error_list[i])
split2 = os.path.split(split1[0])
fname = split2[1] + '/' + split1[1]
msg = '{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(fname, pred_class[i], true_class[i], ' ',
prob_list[i])
print_in_color(msg, (255, 255, 255), (55, 65, 60))
else:
msg = '精度为100%,没有错误'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
if errors > 0:
plot_bar = []
plot_class = []
for key, value in new_dict.items():
# 获得被错误分类的类型的计数(例如:假设 丹顶鹤的下标是11,则下面的操作将获得实际为丹顶鹤的鸟被错误分类的数量)
count = error_indies.count(key)
if count != 0:
plot_bar.append(count)
plot_class.append(value)
fig = plt.figure()
fig.set_figheight(len(plot_class) / 3)
fig.set_figwidth(10)
for i in range(0, len(plot_class)):
c = plot_class[i]
x = plot_bar[i]
plt.barh(c, x, )
plt.title("测试集错误分类")
y_true = np.array(labels)
y_pred = np.array(y_pred)
# 最多显示分类错误的30个分类
if len(classes) <= 30:
# 创建混淆矩阵
cm = confusion_matrix(y_true, y_pred)
length = len(classes)
if length < 8:
fig_width = 8
fig_height = 8
else:
fig_width = int(length * 0.5)
fig_height = int(length * 0.5)
plt.figure(figsize=(fig_width, fig_height))
plt.xticks(np.array(length) + 0.5, classes, rotation=90)
plt.yticks(np.array(length) + 0.5, classes, rotation=0)
plt.xlabel("预测的")
plt.ylabel("真实的")
plt.title("混淆矩阵")
plt.show()
clr = classification_report(y_true, y_pred, target_names=classes)
print("Classification Report:\n----------------------\n", clr)
# 定义一个函数用于保存模型及关联csv文件
def saver(save_path, model, model_name, subject, accuracy, img_size, scalar, generator):
"""
:param save_path: 保存路径
:param model: 模型
:param model_name: 模型名称
:param subject:
:param accuracy: 准确率
:param img_size: 图片大小
:param scalar:
:param generator:
:return:
"""
print("保存路径是:", save_path)
# 保存model (保存准确率的3位小数)
save_id = str(model_name + '-' + subject + '-' + str(accuracy)[:str(accuracy).rfind('.') + 3] + '.h5')
model_save_loc = os.path.join(save_path, save_id)
model.save(model_save_loc)
print_in_color('model was saved as' + model_save_loc, (0, 255, 0), (55, 65, 80))
# 现在创建 class_df 并转换为csv文件
class_dict = generator.class_indices
height = []
width = []
scale = []
for i in range(len(class_dict)):
height.append(img_size[0])
width.append(img_size[1])
scale.append(scalar)
# TODO 这是一个pands对象,但目前还不了解其结构
Index_series = pd.Series(list(class_dict.values()), name='class_index')
Class_series = pd.Series(list(class_dict.keys()), name='class')
Height_series = pd.Series(height, name="height")
Width_series = pd.Series(width, name='width')
Scale_series = pd.Series(scale, name='scale by')
class_df = pd.concat([Index_series, Class_series, Height_series, Width_series, Scale_series], axis=1)
csv_name = 'class_dict_by_GERRY.csv'
csv_save_loc = os.path.join(save_path, csv_name)
class_df.to_csv(csv_save_loc, index=False)
print_in_color('类文件已存储 ' + csv_save_loc, (0, 255, 0), (55, 65, 80))
return model_save_loc, csv_save_loc
# 定义一个使用训练模型的函数和 csv 文件来预测图像
def predictor(sdir, csv_path, model_path, averaged=True, verbose=True):
"""
:param sdir: 图片根目录
:param csv_path: csv保存地址
:param model_path: model保存地址
:param averaged:
:param verbose:
:return:
"""
# 读取 csv 文件
class_df = pd.read_csv(csv_path)
class_count = len(class_df['class'].unique())
img_height = int(class_df['height'].iloc[0])
img_width = int(class_df['width'].iloc[0])
img_size = (img_width, img_height)
scale = class_df['scale by'].iloc[0]
try:
s = int(scale)
s2 = 1
s1 = 0
except:
split = scale.split('-')
s1 = float(split[1])
s2 = float(split[0].split('*')[1])
path_list = []
paths = os.listdir(sdir)
for f in paths:
path_list.append(os.path.join(sdir, f))
if verbose:
print('10秒后加载模型')
model = load_model(model_path)
image_count = len(path_list)
image_list = []
file_list = []
good_image_count = 0
for i in range(image_count):
try:
img = cv2.imread(path_list[i])
img = cv2.resize(img, img_size)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
good_image_count += 1
# TODO 暂时没有理解
img = img * s2 - s1
image_list.append(img)
file_name = os.path.split(path_list[i])[1]
file_list.append(file_name)
except:
if verbose:
print(path_list[i], 'is an invalid image file')
# 如果只有单张图片需要扩张尺寸
if good_image_count == 1:
averaged = True
image_array = np.array(image_list)
# 对图像进行预测,对每个类别求和,然后找到最高概率的类索引
preds = model.predict(image_array)
if averaged:
psum = []
# 创造一个0数组
for i in range(class_count):
psum.append(0)
# 获取其中一个预测值(是一个数组)
for p in preds:
for i in range(class_count):
# TODO 这里究竟是把所有预测值放入psum,还是对概率求和?
psum[i] = psum[i] + p[i]
# 找到具有最高概率总和的类索引
index = np.argmax(psum)
# 获取该下标的类名
klass = class_df['class'].iloc[index]
# 获取概率的均值
prob = psum[index] / good_image_count * 100
for img in image_array:
# 该方法可以修改图片的维度,原本图片的维度为(x,y),现在为(1,x,y)
test_img = np.expand_dims(img, axis=0)
# 找到类别概率最高的下标
test_index = np.argmax(model.predict(test_img))
if test_index == index:
# 展示图片输出结果
if verbose:
plt.axis('off')
# 显示这张图片
plt.imshow(img)
print(f'预测为{klass},概率为{prob:6.4f}%')
break
return klass, prob, img, None
# 为每个图像创建单独的预测
else:
pred_class = []
prob_list = []
for i, p in enumerate(preds):
index = np.argmax(p)
klass = class_df['class'].iloc[index]
image_file = file_list[i]
pred_class.append(klass)
prob_list.append(p[index])
Fseries = pd.Series(file_list, name='图片文件')
Lseries = pd.Series(pred_class, name='??')
Pseries = | pd.Series(prob_list, name='概率') | pandas.Series |
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csgraph.minimum_spanning_tree.html
# Euclidean distance
def dist(p1, p2):
return np.sqrt(sum([(a - b) ** 2 for a, b in zip(p1, p2)]))
# liste of points
dico = {
1:(0, 0, 0),
2:(0, -1, 0),
3:(0, 1, 1),
4:(5, 1, 1),
5:(6, 0, 1),
6:(6, 1, 1)
}
# upper triangular matrix containing the distance between each point
mat = {i:[dist(dico[i], p2) for p2 in list(dico.values())] for i in list(dico)}
df = | pd.DataFrame(mat) | pandas.DataFrame |
# Zipline API
from zipline.api import order, symbol
from zipline import run_algorithm
# Data frames
import pandas as pd
# Logging
from websocket import create_connection
# Data frame To JSON
from ..api.create_response import create_json_response
def apple_run(shares_per_day, capital_base, start_date, end_date, log_channel):
ws = create_connection("ws://alpharithmic.herokuapp.com/ws/logs/%s/" % log_channel)
msg_placeholder = "{\"message\": \"%s\"}"
ws.send(msg_placeholder % "Link Start")
def init(context):
ws.send(msg_placeholder % "Simulation Start")
pass
def handle(context, data):
order(symbol('AAPL'), shares_per_day)
ws.send(msg_placeholder % ("Ordered %s shares of Apple" % str(shares_per_day)))
start = | pd.to_datetime(start_date) | pandas.to_datetime |
# Dataframe manipulation library
import pandas as pd
# Math functions, we'll only need the sqrt function so let's import only that
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Storing the movie information into a pandas dataframe
movies_df = pd.read_csv('movies.csv')
# Storing the user information into a pandas dataframe
ratings_df = | pd.read_csv('ratings.csv') | pandas.read_csv |
""" Class for creating datasets for training a supervised translation classifier and for crosslingual information
retrieval.
"""
import pandas as pd
from tqdm import tqdm
from src.features.embedding_features import cosine_similarity_vector
from src.utils.timer import timer
class DataSet:
""" Class for creating datasets for training a supervised translation classifier and for crosslingual information
retrieval.
Attributes:
preprocessed_dataframe (dataframe): Preprocessed parallel translations dataframe.
model_subset (dataframe): Subset of preprocessed_dataframe for training a supervised translation classifier.
retrieval_subset (dataframe): Subset of preprocessed_datafram for testing crosslingual retrieval models.
model_dataset (dataframe):Generated dataset for training a supervised translation classifier.
retrieval_dataset (dataframe): Dataset for testing crosslingual retrieval models.
"""
@timer
def __init__(self, preprocessed_data):
""" Initialize class by importing preprocessed data.
Args:
preprocessed_data (dataframe): Preprocessed dataframe of parallel translations.
"""
self.preprocessed_dataframe = preprocessed_data
self.model_subset = pd.DataFrame()
self.retrieval_subset = pd.DataFrame()
self.model_dataset_index = pd.DataFrame()
self.model_dataset = pd.DataFrame()
self.retrieval_dataset = pd.DataFrame()
self.retrieval_dataset_index = pd.DataFrame()
@timer
def split_model_retrieval(self, n_model=20000, n_retrieval=5000):
""" Split data into model dataset and retrieval dataset.
Args:
n_model (int): Number of preprocessed datapoints used for supervised modelling.
n_retrieval (int): Number of preprocessed datapoints used for the retrieval task.
"""
try:
self.model_subset = self.preprocessed_dataframe.iloc[0:n_model]
self.retrieval_subset = self.preprocessed_dataframe.iloc[n_model:(n_model + n_retrieval)]
except IndexError:
print("n_model + n_retrieval must be smaller than the dataset size.")
def create_model_index(self, n_model=5000, k=5, sample_size_k=100,
embedding_source="sentence_embedding_tf_idf_proc_5k_source",
embedding_target="sentence_embedding_tf_idf_proc_5k_target"):
""" Generate dataset for modelling a supervised classifier.
Args:
n_model (int): Number of preprocessed datapoints used for supervised modelling.
k (int): Number of false translated sentences pair for training a supervised classifier.
sample_size_k (int): Number of samples from target per source sentence for searching nearest sentences.
embedding_source (str): Name of source embeddings
embedding_target (str): Name of source embeddings
"""
preprocessed_source = self.model_subset[["id_source", embedding_source]]
preprocessed_target = self.model_subset[["id_target", embedding_target]]
random_sample_right = self.model_subset[["id_source", "id_target"]]
multiplied_source = pd.concat([preprocessed_source] * sample_size_k, ignore_index=True).reset_index(
drop=True)
sample_target = preprocessed_target.sample(n_model * sample_size_k, replace=True, random_state=42).reset_index(
drop=True)
random_sample_wrong = pd.concat([multiplied_source, sample_target], axis=1)
# Select only the 2*k closest sentence embeddings for training to increase the complexity of the task for
# the supervised classifier.
random_sample_wrong["cosine_similarity"] = cosine_similarity_vector(
random_sample_wrong["sentence_embedding_tf_idf_proc_5k_source"],
random_sample_wrong["sentence_embedding_tf_idf_proc_5k_target"])
random_sample_k_index = random_sample_wrong.groupby("id_source")['cosine_similarity'].nlargest(k)
rows = []
for i in tqdm(range(n_model)):
for key in random_sample_k_index[i].keys():
rows.append(key)
random_sample_k = random_sample_wrong.iloc[rows].reset_index(drop=True)[["id_source", "id_target"]]
self.model_dataset_index = | pd.concat([random_sample_right, random_sample_k], axis=0) | pandas.concat |
from drop import utils
import pandas as pd
from pathlib import Path
from collections import defaultdict
from snakemake.logging import logger
import warnings
warnings.filterwarnings("ignore", 'This pattern has match groups')
class SampleAnnotation:
FILE_TYPES = ["RNA_BAM_FILE", "DNA_VCF_FILE", "GENE_COUNTS_FILE"]
SAMPLE_ANNOTATION_COLUMNS = FILE_TYPES + [
"RNA_ID", "DNA_ID", "DROP_GROUP", "GENE_ANNOTATION",
"PAIRED_END", "COUNT_MODE", "COUNT_OVERLAPS", "STRAND", "GENOME"
]
def __init__(self, file, root, genome):
"""
sa_file: sample annotation file location from config
root: output location for file mapping
"""
self.root = Path(root)
self.file = file
self.genome = genome
self.annotationTable = self.parse()
self.idMapping = self.createIdMapping()
self.sampleFileMapping = self.createSampleFileMapping()
self.rnaIDs = self.createGroupIds(file_type="RNA_BAM_FILE", sep=',')
self.dnaIDs = self.createGroupIds(file_type="DNA_VCF_FILE", sep=',')
# external counts
self.extGeneCountIDs = self.createGroupIds(file_type="GENE_COUNTS_FILE", sep=',')
def parse(self, sep='\t'):
"""
read and check sample annotation for missing columns
clean columns and set types
"""
data_types = {
"RNA_ID": str, "DNA_ID": str, "DROP_GROUP": str, "GENE_ANNOTATION": str,
"PAIRED_END": bool, "COUNT_MODE": str, "COUNT_OVERLAPS": bool, "STRAND": str, "GENOME": str
}
sa = | pd.read_csv(self.file, sep=sep, index_col=False) | pandas.read_csv |
import pandas as pd
melb_df = pd.read_csv('data/melb_data_fe.csv')
print(melb_df.head())
print(melb_df.info)
melb_df['Date'] = pd.to_datetime(melb_df['Date'])
quarters = melb_df['Date'].dt.quarter
print(quarters.value_counts().iloc[1])
cols_to_exclude = ['Date', 'Rooms', 'Bathroom', 'Bedroom', 'Car']
max_unique_count = 150
for col in melb_df.columns:
if melb_df[col].nunique(
) < max_unique_count and col not in cols_to_exclude:
melb_df[col] = melb_df[col].astype('category')
print(melb_df.info)
print(round(melb_df.sort_values(by='AreaRatio', ignore_index=True,
ascending=False).loc[1558, 'BuildingArea']))
mask1 = melb_df['Type'] == 'townhouse'
mask2 = melb_df['Rooms'] > 2
print(round(melb_df[mask1 & mask2].sort_values(
by=['Rooms', 'MeanRoomsSquare'], ascending=[
True, False], ignore_index=True).loc[18, 'Price']))
print(melb_df.groupby('Rooms')['Price'].mean().sort_values(ascending=False))
print(melb_df.groupby('Regionname')['Lattitude'].std().sort_values())
date1 = | pd.to_datetime('2017-05-01') | pandas.to_datetime |
# Copyright 2022 Microsoft Corporation.
'''
Helper functions for performing coord check.
'''
import os
from copy import copy
from itertools import product
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
def cov(x):
'''Treat `x` as a collection of vectors and its Gram matrix.
Input:
x: If it has shape [..., d], then it's treated as
a collection of d-dimensional vectors
Output:
cov: a matrix of size N x N where N is the product of
the non-last dimensions of `x`.
'''
if x.nelement() == 1:
width = 1
xx = x.reshape(1, 1)
else:
width = x.shape[-1]
xx = x.reshape(-1, x.shape[-1])
return xx @ xx.T / width
def covoffdiag(x):
'''Get off-diagonal entries of `cov(x)` in a vector.
Input:
x: If it has shape [..., d], then it's treated as
a collection of d-dimensional vectors
Output:
Off-diagonal entries of `cov(x)` in a vector.'''
c = cov(x)
return c[~torch.eye(c.shape[0], dtype=bool)]
#: dict of provided functions for use in coord check
FDICT = {
'l1': lambda x: torch.abs(x).mean(),
'l2': lambda x: (x**2).mean()**0.5,
'mean': lambda x: x.mean(),
'std': lambda x: x.std(),
'covl1': lambda x: torch.abs(cov(x)).mean(),
'covl2': lambda x: (cov(x)**2).mean()**0.5,
'covoffdiagl1': lambda x: torch.abs(covoffdiag(x)).mean(),
'covoffdiagl2': lambda x: (covoffdiag(x)**2).mean()**0.5
}
def convert_fdict(d):
'''convert a dict `d` with string values to function values.
Input:
d: a dict whose values are either strings or functions
Output:
a new dict, with the same keys as `d`, but the string values are
converted to functions using `FDICT`.
'''
return dict([
((k, FDICT[v]) if isinstance(v, str) else (k, v))
for k, v in d.items()])
def _record_coords(records, width, modulename, t,
output_fdict=None, input_fdict=None, param_fdict=None):
'''Returns a forward hook that records coordinate statistics.
Returns a forward hook that records statistics regarding the output, input,
and/or parameters of a `nn.Module`. This hook is intended to run only once,
on the timestep specified by `t`.
On forward pass, the returned hook calculates statistics specified in
`output_fdict`, `input_fdict`, and `param_fdict`, such as the normalized l1
norm, of output, input, and/or parameters of the module. The statistics are
recorded along with the `width`, `modulename`, and `t` (the time step) as a
dict and inserted into `records` (which should be a list). More precisely,
for each output, input, and/or parameter, the inserted dict is of the form
{
'width': width, 'module': modified_modulename, 't': t,
# keys are keys in fdict
'l1': 0.241, 'l2': 0.420, 'mean': 0.0, ...
}
where `modified_modulename` is a string that combines the `modulename` with
an indicator of which output, input, or parameter tensor is the statistics
computed over.
The `*_fdict` inputs should be dictionaries with string keys and whose
values can either be functions or strings. The string values are converted
to functions via `convert_fdict`. The default values of `*_dict` inputs are
converted to `output_fdict = dict(l1=FDICT['l1'])`, `input_fdict = {}`,
`param_fdict = {}`, i.e., only the average coordinate size (`l1`) of the
output activations are recorded.
Inputs:
records:
list to append coordinate data to
width:
width of the model. This is used only for plotting coord check later
on, so it can be any notion of width.
modulename:
string name of the module. This is used only for plotting coord check.
t:
timestep of training. This is used only for plotting coord check.
output_fdict, input_fdict, param_fdict:
dicts with string keys and whose values can either be functions or
strings. The string values are converted to functions via
`convert_fdict`
Output:
a forward hook that records statistics regarding the output, input,
and/or parameters of a `nn.Module`, as discussed above.
'''
if output_fdict is None:
output_fdict = dict(l1=FDICT['l1'])
else:
output_fdict = convert_fdict(output_fdict)
if input_fdict is None:
input_fdict = {}
else:
input_fdict = convert_fdict(input_fdict)
if param_fdict is None:
param_fdict = {}
else:
param_fdict = convert_fdict(param_fdict)
def f(module, input, output):
def get_stat(d, x, fdict):
if isinstance(x, (tuple, list)):
for i, _x in enumerate(x):
_d = copy(d)
_d['module'] += f'[{i}]'
get_stat(_d, _x, fdict)
elif isinstance(x, dict):
for name, _x in x.items():
_d = copy(d)
_d['module'] += f'[{name}]'
get_stat(_d, _x, fdict)
elif isinstance(x, torch.Tensor):
_d = copy(d)
for fname, f in fdict.items():
_d[fname] = f(x).item()
records.append(_d)
else:
raise NotImplemented(f'Unexpected output type: {type(x)}')
with torch.no_grad():
ret = {
'width': width,
'module': modulename,
't': t
}
# output stats
if isinstance(output, (tuple, list)):
for i, out in enumerate(output):
_ret = copy(ret)
_ret['module'] += f':out[{i}]'
get_stat(_ret, out, output_fdict)
elif isinstance(output, dict):
for name, out in output.items():
_ret = copy(ret)
_ret['module'] += f':out[{name}]'
get_stat(_ret, out, output_fdict)
elif isinstance(output, torch.Tensor):
_ret = copy(ret)
for fname, f in output_fdict.items():
_ret[fname] = f(output).item()
records.append(_ret)
else:
raise NotImplemented(f'Unexpected output type: {type(output)}')
# input stats
if input_fdict:
if isinstance(input, (tuple, list)):
for i, out in enumerate(input):
_ret = copy(ret)
_ret['module'] += f':in[{i}]'
get_stat(_ret, out, input_fdict)
elif isinstance(input, dict):
for name, out in input.items():
_ret = copy(ret)
_ret['module'] += f':in[{name}]'
get_stat(_ret, out, input_fdict)
elif isinstance(input, torch.Tensor):
_ret = copy(ret)
for fname, f in input_fdict.items():
_ret[fname] = f(input).item()
records.append(_ret)
else:
raise NotImplemented(f'Unexpected output type: {type(input)}')
# param stats
if param_fdict:
for name, p in module.named_parameters():
_ret = copy(ret)
_ret['module'] += f':param[{name}]'
for fname, f in param_fdict.items():
_ret[fname] = f(p).item()
records.append(_ret)
return f
def _get_coord_data(models, dataloader, optcls, nsteps=3,
dict_in_out=False, flatten_input=False, flatten_output=False,
output_name='loss', lossfn='xent', filter_module_by_name=None,
fix_data=True, cuda=True, nseeds=1,
output_fdict=None, input_fdict=None, param_fdict=None,
show_progress=True):
'''Inner method for `get_coord_data`.
Train the models in `models` with optimizer given by `optcls` and data from
`dataloader` for `nsteps` steps, and record coordinate statistics specified
by `output_fdict`, `input_fdict`, `param_fdict`. By default, only `l1` is
computed for output activations of each module.
Inputs:
models:
a dict of lazy models, where the keys are numbers indicating width.
Each entry of `models` is a function that instantiates a model given
nothing.
dataloader:
an iterator whose elements are either Huggingface style dicts, if
`dict_in_out` is True, or (input, label). If `fix_data` is True
(which is the default), then only the first element of `dataloader`
is used in a loop and the rest of `dataloder` is ignored.
optcls:
a function so that `optcls(model)` gives an optimizer used to train
the model.
nsteps:
number of steps to train the model
dict_in_out:
whether the data loader contains Huggingface-style dict input and
output. Default: False
flatten_input:
if not `dict_in_out`, reshape the input to be
`input.view(input.shape[0], -1)`. Typically used for testing MLPs.
flatten_output:
if not `dict_in_out`, reshape the label to be `label.view(-1,
input.shape[-1])`.
output_name:
if `dict_in_out`, this is the key for the loss value if the output
is a dict. If the output is not a dict, then we assume the first
element of the output is the loss.
lossfn:
loss function to use if not `dict_in_out`. Default is `xent` for
cross entropy loss. Other choices are ['mse', 'nll']
filter_module_by_name:
a function that returns a bool given module names (from
`model.named_modules()`), or None. If not None, then only modules
whose name yields True will be recorded.
cuda:
whether to use cuda or not. Default: True
nseeds:
number of times to repeat the training, each with different seeds.
output_fdict, input_fdict, param_fdict:
function dicts to be used in `_record_coords`. By default, only `l1`
is computed for output activations of each module.
show_progress:
show progress using tqdm.
Output:
a pandas DataFrame containing recorded results. The column names are
`'width', 'module', 't'` as well as names of statistics recorded, such
as `'l1'` (see `FDICT` for other premade statistics that can be
collected).
'''
df = []
if fix_data:
batch = next(iter(dataloader))
dataloader = [batch] * nsteps
if show_progress:
from tqdm import tqdm
pbar = tqdm(total=nseeds * len(models))
for i in range(nseeds):
torch.manual_seed(i)
for width, model in models.items():
model = model()
model = model.train()
if cuda:
model = model.cuda()
optimizer = optcls(model)
for batch_idx, batch in enumerate(dataloader, 1):
remove_hooks = []
# add hooks
for name, module in model.named_modules():
if filter_module_by_name and not filter_module_by_name(name):
continue
remove_hooks.append(module.register_forward_hook(
_record_coords(df, width, name, batch_idx,
output_fdict=output_fdict,
input_fdict=input_fdict,
param_fdict=param_fdict)))
if dict_in_out:
if cuda:
for k, v in batch.items():
if isinstance(v, torch.Tensor):
batch[k] = v.cuda()
outputs = model(**batch)
loss = outputs[output_name] if isinstance(outputs, dict) else outputs[0]
else:
(data, target) = batch
if cuda:
data, target = data.cuda(), target.cuda()
if flatten_input:
data = data.view(data.size(0), -1)
output = model(data)
if flatten_output:
output = output.view(-1, output.shape[-1])
if lossfn == 'xent':
loss = F.cross_entropy(output, target)
elif lossfn == 'mse':
loss = F.mse_loss(output, F.one_hot(target, num_classes=output.size(-1)).float())
elif lossfn == 'nll':
loss = F.nll_loss(output, target)
else:
raise NotImplementedError()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# remove hooks
for handle in remove_hooks:
handle.remove()
if batch_idx == nsteps: break
if show_progress:
pbar.update(1)
if show_progress:
pbar.close()
return | pd.DataFrame(df) | pandas.DataFrame |
from dateutil import parser
import numpy as np
import pandas as pd
import urllib3
import json
import datetime as dt
import time
import warnings
import math
#######################################################################
# drops invalid data from our history
def dropDirty(history, exWeekends):
history = history[(history.Open != 0)
& (history.High != 0)
& (history.Low != 0)
& (history.Close != 0)]
history = history[( | pd.isnull(history.Open) | pandas.isnull |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: abnormal_detection_gaussian.py
@time: 2019-04-18 18:03
"""
import pandas as pd
from mayiutils.file_io.pickle_wrapper import PickleWrapper as picklew
from feature_selector import FeatureSelector
if __name__ == '__main__':
mode = 4
if mode == 4:
"""
feature selector
"""
# df1 = pd.read_excel('data.xlsx').iloc[:, 1:]
# print(df1.info())
df = pd.read_excel('/Users/luoyonggui/Documents/work/dataset/0/data.xlsx')
# print(df.info())# 查看df字段和缺失值信息
label = df['理赔结论']
df = df.drop(columns=['理赔结论'])
fs = FeatureSelector(data=df, labels=label)
# 缺失值处理
fs.identify_missing(missing_threshold=0.6)
if mode == 3:
"""
合并参保人基本信息
"""
df1 = pd.read_excel('data.xlsx', 'Sheet2').dropna(axis=1, how='all')
# print(df1.info())
"""
归并客户号 528 non-null int64
性别 528 non-null object
出生年月日 528 non-null datetime64[ns]
婚姻状况 432 non-null object
职业 484 non-null float64
职业危险等级 484 non-null float64
年收入 528 non-null int64
年交保费 528 non-null float64
犹豫期撤单次数 528 non-null int64
既往理赔次数 528 non-null int64
既往拒保次数 528 non-null int64
既往延期承保次数 528 non-null int64
非标准体承保次数 528 non-null int64
既往调查标识 528 non-null object
既往体检标识 528 non-null object
累积寿险净风险保额 528 non-null float64
累积重疾净风险保额 528 non-null float64
投保人年收入与年交保费比值 437 non-null float64
被保险人有效重疾防癌险保单件数 528 non-null int64
被保险人有效短期意外险保单件数 528 non-null int64
被保险人有效短期健康险保单件数 528 non-null int64
被保险人90天内生效保单件数 528 non-null int64
被保险人180天内生效保单件数 528 non-null int64
被保险人365天内生效保单件数 528 non-null int64
被保险人730天内生效保单件数 528 non-null int64
客户黑名单标识 528 non-null object
保单失效日期 11 non-null datetime64[ns]
保单复效日期 7 non-null datetime64[ns]
受益人变更日期 12 non-null datetime64[ns]
"""
cols = list(df1.columns)
cols.remove('保单失效日期')
cols.remove('保单复效日期')
cols.remove('受益人变更日期')
cols.remove('客户黑名单标识')#只有一个值
df1['出生年'] = df1['出生年月日'].apply(lambda x: int(str(x)[:4]))
cols.append('出生年')
cols.remove('出生年月日')
t = pd.get_dummies(df1['婚姻状况'], prefix='婚姻状况')
df2 = pd.concat([df1, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('婚姻状况')
t = pd.get_dummies(df2['性别'], prefix='性别')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('性别')
t = pd.get_dummies(df2['既往调查标识'], prefix='既往调查标识')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('既往调查标识')
t = pd.get_dummies(df2['既往体检标识'], prefix='既往体检标识')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('既往体检标识')
# print(df2['职业'].value_counts())
"""
取前四位 分类
"""
df2['职业'] = df2['职业'].apply(lambda x: str(x)[:1])
# print(df2['职业'].value_counts())
t = pd.get_dummies(df2['职业'], prefix='职业')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('职业')
print(df2['职业危险等级'].value_counts())
t = pd.get_dummies(df2['职业危险等级'], prefix='职业危险等级')
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
cols.extend(list(t.columns))
cols.remove('职业危险等级')
df2['投保人年收入与年交保费比值'] = df2['投保人年收入与年交保费比值'].fillna(0)
"""
归并客户号有重复的,取重复值的第一条
"""
df2 = df2.drop_duplicates(subset=['归并客户号'], keep='first')
print(df2['归并客户号'].value_counts())
df2 = df2.rename(columns={'归并客户号': '被保人归并客户号'})
cols.remove('归并客户号')
cols.append('被保人归并客户号')
# print(df2[cols].info())
#合并
train_df = picklew.loadFromFile('train_data1.pkl')
print(train_df.shape)
train_df = pd.merge(train_df, df2[cols], how='left', on='被保人归并客户号')
del train_df['营销员工号']
del train_df['被保人核心客户号']
del train_df['保人归并客户号']
del train_df['被保人归并客户号']
print(train_df.shape) # (562, 30)
print(train_df.info())
del train_df['理赔金额']
picklew.dump2File(train_df, 'train_data2.pkl')
if mode == 2:
"""
合并销售人员信息
"""
df1 = pd.read_excel('data.xlsx', 'Sheet1')
# print(df1.info())
"""
RangeIndex: 532 entries, 0 to 531
Data columns (total 7 columns):
营销员工号 532 non-null int64
营销员黑名单标记 326 non-null object
营销员入司时间 326 non-null datetime64[ns]
营销员离职时间 97 non-null datetime64[ns]
营销员所售保单数量 532 non-null int64
营销员所售保单标准体数量 532 non-null int64
营销员所售保单出险数量 532 non-null int64
"""
cols = list(df1.columns)
# print(df1['营销员黑名单标记'].value_counts())
"""
全部都是N, 没有意义, 删除
"""
cols.remove('营销员离职时间')
df2 = df1[cols].dropna()
cols.remove('营销员黑名单标记')
cols.remove('营销员入司时间')
df2 = df2[cols]
# print(df2.info())
"""
营销员工号 326 non-null int64
营销员所售保单数量 326 non-null int64
营销员所售保单标准体数量 326 non-null int64
营销员所售保单出险数量 326 non-null int64
"""
# print(df2['营销员工号'].value_counts())
# print(df2.info())
#合并df
train_df = picklew.loadFromFile('train_data.pkl')
train_df = train_df.rename(columns={'(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号':'营销员工号'})
print(train_df.shape)
# train_df = pd.merge(train_df, df2, how='left', on='营销员工号')
print(train_df.shape)#(562, 30)
print(train_df.info())
picklew.dump2File(train_df, 'train_data1.pkl')
if mode == 1:
"""
主表
"""
df1 = pd.read_excel('data.xlsx').iloc[:, 1:]
# print(df1.shape)#(562, 41)
# print(df1.columns)
"""
['平台流水号', '保单管理机构', '保单号', '指定受益人标识', '受益人与被保险人关系', '交费方式',
'交费期限', '核保标识', '核保结论', '投保时年龄', '基本保额与体检保额起点比例', '生调保额起点',
'投保保额临近核保体检临界点标识', '投保保额', '临近核保生调临界点标识', '理赔金额', '累计已交保费', '理赔结论',
'Unnamed: 19', '生效日期', '出险前最后一次复效日期', '承保后最小借款日期', '出险日期', '报案时间',
'申请日期', '出险减生效天数', '出险减最后一次复效天数', '重疾保单借款减生效日期天数', '申请时间减出险时间',
'报案时间减出险时间', '出险原因1', '出险原因2', '出险原因3', '出险结果', '保单借款展期未还次数', '失复效记录次数',
'销售渠道', '(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号', '被保人核心客户号', '保人归并客户号',
'被保人归并客户号']
"""
# 删除全部为null的列
df2 = df1.dropna(axis=1, how='all')
# print(df2.shape)#(562, 33)
# print(df2.columns)
"""
['平台流水号', '保单管理机构', '保单号', '指定受益人标识', '受益人与被保险人关系', '交费方式', '交费期限',
'核保标识', '核保结论', '投保时年龄', '投保保额', '理赔金额', '累计已交保费', '理赔结论',
'Unnamed: 19', '生效日期', '出险前最后一次复效日期', '承保后最小借款日期', '出险日期', '报案时间',
'申请日期', '出险减生效天数', '出险减最后一次复效天数', '申请时间减出险时间', '报案时间减出险时间', '出险原因1',
'出险结果', '失复效记录次数', '销售渠道', '(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号',
'被保人核心客户号', '保人归并客户号', '被保人归并客户号']
"""
# print(df2.info())
"""
平台流水号 562 non-null int64
保单管理机构 562 non-null int64
保单号 562 non-null int64
指定受益人标识 562 non-null object
受益人与被保险人关系 538 non-null object
交费方式 562 non-null object
交费期限 562 non-null int64
核保标识 562 non-null object
核保结论 544 non-null object
投保时年龄 562 non-null int64
投保保额 562 non-null float64
理赔金额 562 non-null float64
累计已交保费 562 non-null float64
理赔结论 562 non-null object
Unnamed: 19 562 non-null int64
生效日期 562 non-null datetime64[ns]
出险前最后一次复效日期 6 non-null datetime64[ns]
承保后最小借款日期 2 non-null datetime64[ns]
出险日期 562 non-null datetime64[ns]
报案时间 119 non-null datetime64[ns]
申请日期 562 non-null datetime64[ns]
出险减生效天数 562 non-null int64
出险减最后一次复效天数 6 non-null float64
申请时间减出险时间 562 non-null int64
报案时间减出险时间 119 non-null float64
出险原因1 562 non-null object
出险结果 552 non-null object
失复效记录次数 562 non-null int64
销售渠道 562 non-null object
(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号 562 non-null int64
被保人核心客户号 562 non-null int64
保人归并客户号 562 non-null int64
被保人归并客户号 562 non-null int64
"""
train_col = list(df2.columns)
train_col.remove('平台流水号')
train_col.remove('Unnamed: 19')
train_col.remove('生效日期')
train_col.remove('出险日期')
train_col.remove('报案时间')
train_col.remove('申请日期')
train_col.remove('出险减最后一次复效天数')
train_col.remove('报案时间减出险时间')
train_col.remove('出险前最后一次复效日期')
train_col.remove('承保后最小借款日期')
# print(df2[train_col].info())
"""
保单管理机构 562 non-null int64
保单号 562 non-null int64
指定受益人标识 562 non-null object
受益人与被保险人关系 538 non-null object
交费方式 562 non-null object
交费期限 562 non-null int64
核保标识 562 non-null object
核保结论 544 non-null object
投保时年龄 562 non-null int64
投保保额 562 non-null float64
理赔金额 562 non-null float64
累计已交保费 562 non-null float64
出险减生效天数 562 non-null int64
申请时间减出险时间 562 non-null int64
出险原因1 562 non-null object
出险结果 552 non-null object
失复效记录次数 562 non-null int64
销售渠道 562 non-null object
(SELECTDISTINCTLJ.AGENTCODEFRO销售人员工号 562 non-null int64
被保人核心客户号 562 non-null int64
保人归并客户号 562 non-null int64
被保人归并客户号 562 non-null int64
"""
label = df2['理赔结论']
train_col.remove('理赔结论')#删除label
# print(label.value_counts())
"""
正常给付 432
全部拒付 107
协议给付 15
部分给付 8
"""
# print(df1['保单号'].value_counts())
train_col.remove('保单号')
# print(df1['保单管理机构'].value_counts())
"""
取前4位
"""
df2['保单管理机构'] = df2['保单管理机构'].copy().apply(lambda x: str(x)[:4])
# print(df2['保单管理机构'].value_counts())
"""
8603 280
8602 163
8605 65
8604 34
8606 16
8608 4
"""
t = pd.get_dummies(df2['指定受益人标识'], prefix='指定受益人标识')
print(df2.shape)
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
train_col.extend(list(t.columns))
train_col.remove('指定受益人标识')
t = pd.get_dummies(df2['交费方式'], prefix='交费方式')
print(df2.shape)
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
train_col.extend(list(t.columns))
train_col.remove('交费方式')
t = pd.get_dummies(df2['核保标识'], prefix='核保标识')
print(df2.shape)
df2 = pd.concat([df2, t], axis=1)
print(df2.shape)
train_col.extend(list(t.columns))
train_col.remove('核保标识')
t = pd.get_dummies(df2['保单管理机构'], prefix='保单管理机构')
print(df2.shape)
df2 = | pd.concat([df2, t], axis=1) | pandas.concat |
########################################################################
# Required packages
########################################################################
import argparse
import sys
import os
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import tomotopy as tp
from pyteomics import mgf, auxiliary
from rdkit import Chem
import scipy.spatial
########################################################################
# Parse arguments
########################################################################
parser = argparse.ArgumentParser()
def file_choices(choices,fname, argname):
ext = os.path.splitext(fname)[1]
if ext not in choices:
parser.error(f"{argname} must be one of the following filetypes ({choices})")
return fname
parser.add_argument('--Q', required = True, type = float)
parser.add_argument('--B', required = True, type = float)
parser.add_argument('--out_dir', required = True)
parser.add_argument('--train_mgf', required = True)
parser.add_argument('--test_mgf', required = True)
parser.add_argument('--documents_dir', required = True)
parser.add_argument('--df_substructs', type=lambda s:file_choices((".tsv"),s, '--df_substructs'), required = True)
parser.add_argument('--df_labels', type=lambda s:file_choices((".tsv"),s, '--df_labels'), required = True)
parser.add_argument('--num_iterations', required = True)
parser.add_argument('--mz_cutoff', type = float, default = 30.0)
parser.add_argument('--loss_types', choices = ['none', 'parent', 'all'], type = str, default = 'all')
args = parser.parse_args()
os.system(f"mkdir -p {args.out_dir}")
with open(os.path.join(args.out_dir, 'log.txt'), 'w') as f:
print(f"{' '.join(sys.argv)}\n", file = f)
print(args, file = f)
########################################################################
df_substructs = pd.read_csv(args.df_substructs, sep = '\t')
def get_mes_vec(s):
mol = Chem.MolFromSmiles(s)
vec = np.array([mol.HasSubstructMatch(Chem.MolFromSmarts(patt)) for patt in df_substructs['smarts']]).astype(int)
return(vec)
def get_mes_labels(s):
v = get_mes_vec(s)
return(df_substructs.iloc[np.where(v > 0)]['index'].values)
########################################################################
def glf(Q, B, x, v = 0.5, A = 0, K = 100, C = 1):
res = (K - A) / np.power(C + Q * np.exp(-B * x), (1 / v))
return(res)
########################################################################
doc_indices = []
doc_fnames = []
doc_smiles = []
doc_sids = []
mdl = tp.LLDAModel(seed = 2010)
print('Generating documents...')
sys.stdout.flush()
def make_doc(spec, documents_dir):
in_fname = os.path.join(documents_dir, spec['params']['id'] + '.tsv')
df = pd.read_csv(in_fname, sep = '\t')
if 'mz' not in df.columns:
df['mz'] = df['m/z']
df['mz_rounded'] = df['mz'].round(2)
df['rel_intensity'] = df['intensity'] / df['intensity'].max() * 100
df['rel_intensity_rounded'] = df['rel_intensity'].round(0).astype(int)
df = df[df['mz'] >= args.mz_cutoff] # filter by m/z
df['intensity'] = df['intensity'].astype(int)
df['glf_intensity'] = glf(Q = args.Q, B = args.B, x = df['rel_intensity'])
doc_frags = np.concatenate([[w for _ in range(round(c))] for w, c in df[df['index'].str.contains('frag_')][['formula', 'glf_intensity']].values]).astype(str)
if args.loss_types == 'parent':
parent_index = df[df['index'].str.contains('frag_')].sort_values(by = 'mz', ascending = False).iloc[0]['index']
df = df[(df['index'].str.contains('loss_')) & (df['from_index'] == parent_index)]
if np.any(df['index'].str.contains('loss_')) and (not args.loss_types != 'none'):
doc_losses = np.concatenate([[f"loss_{w}" for _ in range(round(c))] for w, c in df[df['index'].str.contains('loss_')][['formula', 'glf_intensity']].values]).astype(str)
else:
doc_losses = np.array([])
doc = np.concatenate([doc_frags, doc_losses])
return(doc)
for n_train_spectra, _ in enumerate(mgf.MGF(args.train_mgf)):
continue
n_train_spectra += 1
f = mgf.MGF(args.train_mgf)
for spec in tqdm(f, total = n_train_spectra):
try:
doc = make_doc(spec, args.documents_dir)
s = spec['params']['smiles']
sid = spec['params']['id']
s_fname = os.path.join(args.documents_dir, sid + '.tsv')
labs = get_mes_labels(s)
di = mdl.add_doc(doc, labels = labs)
doc_indices.append(di)
doc_smiles.append(s)
doc_fnames.append(s_fname)
doc_sids.append(sid + '_train')
except:
s_fname = os.path.join(args.documents_dir, sid + '.tsv')
print(s_fname)
sys.stdout.flush()
continue
os.system(f'mkdir -p {args.out_dir}')
df_out = | pd.DataFrame({'doc_index' : doc_indices, 'fname' : doc_fnames, 'smiles' : doc_smiles}) | pandas.DataFrame |
from pathlib import Path
from unittest import TestCase
import pandas as pd
from src.data.datasets import employment_rate_by_age_csv, job_market_xlsx
from src.data.entities import prop_industrial_section, prop_employment_status, prop_ishealthcare, \
Gender
from src.features import EmploymentParams, Employment
from src.generation.population_generator_for_cities import age_gender_generation_population_from_files, age_range_to_age
class TestEmployment(TestCase):
def setUp(self) -> None:
project_dir = Path(__file__).resolve().parents[2]
city = 'DW'
self.data_folder = project_dir / 'data' / 'processed' / 'poland' / city
self.resources_folder = Path(__file__).resolve().parent / 'resources'
self.population = age_gender_generation_population_from_files(self.data_folder)
self.population = age_range_to_age(self.population)
def test_employment(self):
population = Employment().generate(EmploymentParams(self.data_folder), self.population)
population_columns = population.columns.tolist()
self.assertIn(prop_industrial_section, population_columns)
self.assertIn(prop_employment_status, population_columns)
self.assertIn(prop_ishealthcare, population_columns)
def test_split_population_by_age(self):
people_by_age = Employment()._split_shuffle_population_by_age(self.population, Gender.FEMALE)
self.assertEqual(3, len(people_by_age))
expected_eligible_to_work = len(self.population[(self.population.age >= 15) & (self.population.age < 65)
& (self.population.gender == Gender.FEMALE.value)].index)
self.assertEqual(expected_eligible_to_work, sum([len(x) for x in people_by_age.values()]))
def _prepare_gender_by_age(self, young, middle, middle_immobile):
return {Employment.young_adults_class: list(range(young)),
Employment.middle_aged_class: list(range(middle)),
Employment.middle_aged_immobile_class: list(range(middle_immobile))}
def test_get_job_market_per_age_group(self):
employment_feature = Employment()
employment_rate_per_age = pd.read_csv(str(self.resources_folder / employment_rate_by_age_csv.file_name))
job_market_df = pd.read_excel(str(self.resources_folder / job_market_xlsx.file_name),
sheet_name=job_market_xlsx.sheet_name)
gender_by_age = self._prepare_gender_by_age(100, 300, 100)
gender_column = Employment.females_col
result = employment_feature._get_job_market_per_age_group(employment_rate_per_age, gender_by_age, job_market_df,
gender_column)
class_1 = [3, 6, 9, 2]
class_2 = [40, 79, 119, 32]
class_3 = [7, 15, 22, 6]
expected_result = | pd.DataFrame(data={1: class_1, 2: class_2, 3: class_3, 'id': ['A', 'B', 'C', 'D']}) | pandas.DataFrame |
import numpy as np
import pandas as pd
from tqdm import tqdm
"""
@author (Brian) <NAME>
Our data has classes [0,4], yet there is significant class imbalance. To resolve
this, model is pretrained on 2019 data with classes balanced by data from 2015
competition. e.g. classes 1,3,4 that are low in frequency received more samples.
In this script data from 2015 are randomly selected to resolve the class
imbalance, and save the labels to a new CSV file.
"""
balancing_limit = 2000
old_df = | pd.read_csv("/nas-homes/joonl4/blind_2015/trainLabels.csv") | pandas.read_csv |
# coding: utf-8
import json
import re
import pandas as pd
import numpy as np
from os.path import dirname, abspath
from six import string_types
import cobra
from kappmax_prediction_scripts.proteomics_helper_functions import \
get_all_protein_localizations, get_metabolic_reactions_from_gene, \
get_membrane_transport_genes, get_rna_modfication_genes, \
get_sum_of_metabolic_fluxes_of_gene
import kappmax_prediction_scripts
scripts_dir = dirname(abspath(kappmax_prediction_scripts.__file__))
home_dir = dirname(scripts_dir)
resource_dir = home_dir + '/data/'
ijo = cobra.io.load_json_model('%s/iJO1366.json' % scripts_dir)
with open('%s/gene_name_to_bnum.json' % resource_dir, 'r') as f:
bnum_to_gene = json.load(f)
def get_sim_raw_df(model, sim_filename):
with open(sim_filename, 'r') as f:
me_sim = json.load(f)
series_dict = dict()
model.solution = cobra.core.Solution(me_sim['biomass_dilution'],
x_dict=me_sim, status='optimal')
for key, value in model.get_translation_flux().items():
gene = key.replace('translation_', '')
series_dict[gene] = value
sim_series = pd.Series(series_dict)
return sim_series
def get_old_sim_raw_df(model, media, old_me_sims_filename):
media = media.map_media_to_old_me_df[media]
old_sim_df = pd.read_pickle(old_me_sims_filename)
sim_df_filtered = old_sim_df[media]
series_dict = {}
for gene in sim_df_filtered.index:
protein_id = 'protein_' + gene
if protein_id not in model.metabolites:
continue
series_dict[gene] = sim_df_filtered[gene]
old_sim_series = pd.Series(series_dict)
return old_sim_series
def get_proteomics_data_raw_df(model, media, proteomics_data_path):
# Load proteomics data and filter genes not modeled in ME-model
data_df = pd.read_excel(proteomics_data_path)
data_df = data_df.set_index('Gene').rename(index=bnum_to_gene)
data_series = data_df[media].copy()
cog_column = ['Annotated functional COG group (description)']
data_dict = {}
cog_dict = {}
genes = [i.id.replace('RNA_', '') for i in
model.metabolites.query(re.compile('RNA_b[0-9]'))]
for gene in genes:
protein_id = 'protein_' + gene
# Some RNAs do not code proteins, skip these
if protein_id not in model.metabolites:
continue
if gene in data_series.index:
# Some genes have two entries. Take the average of these
data_dict[gene] = data_series[gene].mean()
# Handle genes with duplicate entries
cog_value = data_df.loc[gene, cog_column]
if isinstance(cog_value, pd.DataFrame):
cog_value = cog_value.values[0]
if isinstance(cog_value, pd.Series):
cog_value = cog_value.values[0]
if isinstance(cog_value, np.ndarray):
cog_value = cog_value[0]
if type(cog_value) == list:
cog_value = cog_value[0]
if not isinstance(cog_value, string_types) and \
type(cog_value) != float:
raise UserWarning('Cog is bad', cog_value, type(cog_value))
cog_dict[gene] = cog_value
filtered_data_series = pd.Series(data_dict)
cog_series = | pd.Series(cog_dict) | pandas.Series |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.