prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import matplotlib as mat
import numpy as np
import pandas as pd
from matplotlib.axes._base import _process_plot_format
from pandas.core.dtypes.inference import is_list_like
from pandas.io.formats.printing import pprint_thing
from pyspark.pandas.plot import (
TopNPlotBase,
SampledPlotBase,
HistogramPlotBase,
BoxPlotBase,
unsupported_function,
KdePlotBase,
)
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
from pandas.plotting._core import (
_all_kinds,
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
MPLPlot as PandasMPLPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
else:
from pandas.plotting._matplotlib import (
BarPlot as PandasBarPlot,
BoxPlot as PandasBoxPlot,
HistPlot as PandasHistPlot,
PiePlot as PandasPiePlot,
AreaPlot as PandasAreaPlot,
LinePlot as PandasLinePlot,
BarhPlot as PandasBarhPlot,
ScatterPlot as PandasScatterPlot,
KdePlot as PandasKdePlot,
)
from pandas.plotting._core import PlotAccessor
from pandas.plotting._matplotlib.core import MPLPlot as PandasMPLPlot
_all_kinds = PlotAccessor._all_kinds
class PandasOnSparkBarPlot(PandasBarPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _plot(self, ax, x, y, w, start=0, log=False, **kwds):
self.set_result_text(ax)
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
class PandasOnSparkBoxPlot(PandasBoxPlot, BoxPlotBase):
def boxplot(
self,
ax,
bxpstats,
notch=None,
sym=None,
vert=None,
whis=None,
positions=None,
widths=None,
patch_artist=None,
bootstrap=None,
usermedians=None,
conf_intervals=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
boxprops=None,
labels=None,
flierprops=None,
medianprops=None,
meanprops=None,
capprops=None,
whiskerprops=None,
manage_ticks=None,
# manage_xticks is for compatibility of matplotlib < 3.1.0.
# Remove this when minimum version is 3.0.0
manage_xticks=None,
autorange=False,
zorder=None,
precision=None,
):
def update_dict(dictionary, rc_name, properties):
"""Loads properties in the dictionary from rc file if not already
in the dictionary"""
rc_str = "boxplot.{0}.{1}"
if dictionary is None:
dictionary = dict()
for prop_dict in properties:
dictionary.setdefault(prop_dict, mat.rcParams[rc_str.format(rc_name, prop_dict)])
return dictionary
# Common property dictionaries loading from rc
flier_props = [
"color",
"marker",
"markerfacecolor",
"markeredgecolor",
"markersize",
"linestyle",
"linewidth",
]
default_props = ["color", "linewidth", "linestyle"]
boxprops = update_dict(boxprops, "boxprops", default_props)
whiskerprops = update_dict(whiskerprops, "whiskerprops", default_props)
capprops = update_dict(capprops, "capprops", default_props)
medianprops = update_dict(medianprops, "medianprops", default_props)
meanprops = update_dict(meanprops, "meanprops", default_props)
flierprops = update_dict(flierprops, "flierprops", flier_props)
if patch_artist:
boxprops["linestyle"] = "solid"
boxprops["edgecolor"] = boxprops.pop("color")
# if non-default sym value, put it into the flier dictionary
# the logic for providing the default symbol ('b+') now lives
# in bxp in the initial value of final_flierprops
# handle all of the `sym` related logic here so we only have to pass
# on the flierprops dict.
if sym is not None:
# no-flier case, which should really be done with
# 'showfliers=False' but none-the-less deal with it to keep back
# compatibility
if sym == "":
# blow away existing dict and make one for invisible markers
flierprops = dict(linestyle="none", marker="", color="none")
# turn the fliers off just to be safe
showfliers = False
# now process the symbol string
else:
# process the symbol string
# discarded linestyle
_, marker, color = _process_plot_format(sym)
# if we have a marker, use it
if marker is not None:
flierprops["marker"] = marker
# if we have a color, use it
if color is not None:
# assume that if color is passed in the user want
# filled symbol, if the users want more control use
# flierprops
flierprops["color"] = color
flierprops["markerfacecolor"] = color
flierprops["markeredgecolor"] = color
# replace medians if necessary:
if usermedians is not None:
if len(np.ravel(usermedians)) != len(bxpstats) or np.shape(usermedians)[0] != len(
bxpstats
):
raise ValueError("usermedians length not compatible with x")
else:
# reassign medians as necessary
for stats, med in zip(bxpstats, usermedians):
if med is not None:
stats["med"] = med
if conf_intervals is not None:
if np.shape(conf_intervals)[0] != len(bxpstats):
err_mess = "conf_intervals length not compatible with x"
raise ValueError(err_mess)
else:
for stats, ci in zip(bxpstats, conf_intervals):
if ci is not None:
if len(ci) != 2:
raise ValueError("each confidence interval must " "have two values")
else:
if ci[0] is not None:
stats["cilo"] = ci[0]
if ci[1] is not None:
stats["cihi"] = ci[1]
should_manage_ticks = True
if manage_xticks is not None:
should_manage_ticks = manage_xticks
if manage_ticks is not None:
should_manage_ticks = manage_ticks
if LooseVersion(mat.__version__) < LooseVersion("3.1.0"):
extra_args = {"manage_xticks": should_manage_ticks}
else:
extra_args = {"manage_ticks": should_manage_ticks}
artists = ax.bxp(
bxpstats,
positions=positions,
widths=widths,
vert=vert,
patch_artist=patch_artist,
shownotches=notch,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
meanprops=meanprops,
meanline=meanline,
showfliers=showfliers,
capprops=capprops,
whiskerprops=whiskerprops,
zorder=zorder,
**extra_args,
)
return artists
def _plot(self, ax, bxpstats, column_num=None, return_type="axes", **kwds):
bp = self.boxplot(ax, bxpstats, **kwds)
if return_type == "dict":
return bp, bp
elif return_type == "both":
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _compute_plot_data(self):
colname = self.data.name
spark_column_name = self.data._internal.spark_column_name_for(self.data._column_label)
data = self.data
# Updates all props with the rc defaults from matplotlib
self.kwds.update(PandasOnSparkBoxPlot.rc_defaults(**self.kwds))
# Gets some important kwds
showfliers = self.kwds.get("showfliers", False)
whis = self.kwds.get("whis", 1.5)
labels = self.kwds.get("labels", [colname])
# This one is pandas-on-Spark specific to control precision for approx_percentile
precision = self.kwds.get("precision", 0.01)
# # Computes mean, median, Q1 and Q3 with approx_percentile and precision
col_stats, col_fences = BoxPlotBase.compute_stats(data, spark_column_name, whis, precision)
# # Creates a column to flag rows as outliers or not
outliers = BoxPlotBase.outliers(data, spark_column_name, *col_fences)
# # Computes min and max values of non-outliers - the whiskers
whiskers = BoxPlotBase.calc_whiskers(spark_column_name, outliers)
if showfliers:
fliers = BoxPlotBase.get_fliers(spark_column_name, outliers, whiskers[0])
else:
fliers = []
# Builds bxpstats dict
stats = []
item = {
"mean": col_stats["mean"],
"med": col_stats["med"],
"q1": col_stats["q1"],
"q3": col_stats["q3"],
"whislo": whiskers[0],
"whishi": whiskers[1],
"fliers": fliers,
"label": labels[0],
}
stats.append(item)
self.data = {labels[0]: stats}
def _make_plot(self):
bxpstats = list(self.data.values())[0]
ax = self._get_ax(0)
kwds = self.kwds.copy()
for stats in bxpstats:
if len(stats["fliers"]) > 1000:
stats["fliers"] = stats["fliers"][:1000]
ax.text(
1,
1,
"showing top 1,000 fliers only",
size=6,
ha="right",
va="bottom",
transform=ax.transAxes,
)
ret, bp = self._plot(ax, bxpstats, column_num=0, return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self.data.items()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
@staticmethod
def rc_defaults(
notch=None,
vert=None,
whis=None,
patch_artist=None,
bootstrap=None,
meanline=None,
showmeans=None,
showcaps=None,
showbox=None,
showfliers=None,
**kwargs
):
# Missing arguments default to rcParams.
if whis is None:
whis = mat.rcParams["boxplot.whiskers"]
if bootstrap is None:
bootstrap = mat.rcParams["boxplot.bootstrap"]
if notch is None:
notch = mat.rcParams["boxplot.notch"]
if vert is None:
vert = mat.rcParams["boxplot.vertical"]
if patch_artist is None:
patch_artist = mat.rcParams["boxplot.patchartist"]
if meanline is None:
meanline = mat.rcParams["boxplot.meanline"]
if showmeans is None:
showmeans = mat.rcParams["boxplot.showmeans"]
if showcaps is None:
showcaps = mat.rcParams["boxplot.showcaps"]
if showbox is None:
showbox = mat.rcParams["boxplot.showbox"]
if showfliers is None:
showfliers = mat.rcParams["boxplot.showfliers"]
return dict(
whis=whis,
bootstrap=bootstrap,
notch=notch,
vert=vert,
patch_artist=patch_artist,
meanline=meanline,
showmeans=showmeans,
showcaps=showcaps,
showbox=showbox,
showfliers=showfliers,
)
class PandasOnSparkHistPlot(PandasHistPlot, HistogramPlotBase):
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _compute_plot_data(self):
self.data, self.bins = HistogramPlotBase.prepare_hist_data(self.data, self.bins)
def _make_plot(self):
# TODO: this logic is similar with KdePlot. Might have to deduplicate it.
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
output_series = HistogramPlotBase.compute_hist(self.data, self.bins)
for (i, label), y in zip(enumerate(self.data._internal.column_labels), output_series):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0, stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
base = np.zeros(len(bins) - 1)
bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"])
# Since the counts were computed already, we use them as weights and just generate
# one entry for each bin
n, bins, patches = ax.hist(bins[:-1], bins=bins, bottom=bottom, weights=y, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
class PandasOnSparkPiePlot(PandasPiePlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkAreaPlot(PandasAreaPlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkLinePlot(PandasLinePlot, SampledPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_sampled(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkBarhPlot(PandasBarhPlot, TopNPlotBase):
def __init__(self, data, **kwargs):
super().__init__(self.get_top_n(data), **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkScatterPlot(PandasScatterPlot, TopNPlotBase):
def __init__(self, data, x, y, **kwargs):
super().__init__(self.get_top_n(data), x, y, **kwargs)
def _make_plot(self):
self.set_result_text(self._get_ax(0))
super()._make_plot()
class PandasOnSparkKdePlot(PandasKdePlot, KdePlotBase):
def _compute_plot_data(self):
self.data = KdePlotBase.prepare_kde_data(self.data)
def _make_plot(self):
# 'num_colors' requires to calculate `shape` which has to count all.
# Use 1 for now to save the computation.
colors = self._get_colors(num_colors=1)
stacking_id = self._get_stacking_id()
sdf = self.data._internal.spark_frame
for i, label in enumerate(self.data._internal.column_labels):
# 'y' is a Spark DataFrame that selects one column.
y = sdf.select(self.data._internal.spark_column_for(label))
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label if len(label) > 1 else label[0])
kwds["label"] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds["style"] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _get_ind(self, y):
return KdePlotBase.get_ind(y, self.ind)
@classmethod
def _plot(
cls, ax, y, style=None, bw_method=None, ind=None, column_num=None, stacking_id=None, **kwds
):
y = KdePlotBase.compute_kde(y, bw_method=bw_method, ind=ind)
lines = | PandasMPLPlot._plot(ax, ind, y, style=style, **kwds) | pandas.plotting._matplotlib.core.MPLPlot._plot |
import pandas as pd
import numpy as np
import unittest
from dstools.preprocessing.OneHotEncoder import OneHotEncoder
class TestOneHotEncoder(unittest.TestCase):
def compare_DataFrame(self, df_transformed, df_transformed_correct):
"""
helper function to compare the values of the transformed DataFrame with the values of a correctly transformed DataFrame
"""
#same number of columns
self.assertEqual(len(df_transformed.columns), len(df_transformed_correct.columns))
#check for every column in correct DataFrame, that all items are equal
for column in df_transformed_correct.columns:
#compare every element
for x, y in zip(df_transformed[column], df_transformed_correct[column]):
#if both values are np.NaN, the assertion fails, although they are equal
if np.isnan(x)==True and np.isnan(y)==True:
pass
else:
self.assertEqual(x, y)
def test_only_non_numeric(self):
"""
only columns containing non numerical values should be encoded
"""
df = | pd.DataFrame({'x1':[1,2], 'x2':['a','b']}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Created by <NAME>
import unittest
import pandas as pd
import pandas.testing as pdtest
from allfreqs import AlleleFreqs
from allfreqs.classes import Reference, MultiAlignment
from allfreqs.tests.constants import (
REAL_ALG_X_FASTA, REAL_ALG_X_NOREF_FASTA, REAL_RSRS_FASTA,
REAL_ALG_L6_FASTA, REAL_ALG_L6_NOREF_FASTA,
SAMPLE_MULTIALG_FASTA, SAMPLE_MULTIALG_NOREF_FASTA, SAMPLE_REF_FASTA,
SAMPLE_MULTIALG_CSV, SAMPLE_MULTIALG_NOREF_CSV, SAMPLE_REF_CSV,
sample_sequences_df, SAMPLE_SEQUENCES_DICT, sample_sequences_freqs,
sample_sequences_freqs_amb, SAMPLE_FREQUENCIES,
SAMPLE_FREQUENCIES_AMB, REAL_ALG_X_DF, REAL_X_FREQUENCIES, REAL_ALG_L6_DF,
REAL_L6_FREQUENCIES, TEST_CSV
)
class TestBasic(unittest.TestCase):
def setUp(self) -> None:
ref = Reference("AAG-CTNGGGCATTTCAGGGTGAGCCCGGGCAATACAGGG-TAT")
alg = MultiAlignment(SAMPLE_SEQUENCES_DICT)
self.af = AlleleFreqs(multialg=alg, reference=ref)
self.af_amb = AlleleFreqs(multialg=alg, reference=ref, ambiguous=True)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_frequencies_ambiguous(self):
# Given/When
exp_freqs = sample_sequences_freqs_amb()
# Then
pdtest.assert_frame_equal(self.af_amb.frequencies, exp_freqs)
def test__get_frequencies(self):
# Given
test_freq = pd.Series({'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3,
'-': 0.1, 'N': 0.1})
exp_freq = {'A': 0.2, 'C': 0.2, 'G': 0.1, 'T': 0.3, 'gap': 0.1,
'oth': 0.1}
# When
result = self.af._get_frequencies(test_freq)
# Then
self._dict_almost_equal(result, exp_freq)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
def test_to_csv_ambiguous(self):
# Given/When
self.af_amb.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES_AMB)
# Then
pdtest.assert_frame_equal(result, expected)
@staticmethod
def _dict_almost_equal(expected: dict, result: dict, acc=10**-8) -> bool:
"""Compare to dictionaries and ensure that all their values are the
same, accounting for some fluctuation up to the given accuracy value.
Args:
expected: expected dictionary
result: resulting dictionary
acc: accuracy to use [default: 10**-8]
"""
if expected.keys() == result.keys():
for key in expected.keys():
if abs(expected[key] - result[key]) < acc:
continue
return True
return False
# From Fasta
class TestFromFasta(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromFastaNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=SAMPLE_MULTIALG_NOREF_FASTA,
reference=SAMPLE_REF_FASTA)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# From Csv
class TestFromCsv(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
class TestFromCsvNoRef(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_csv(sequences=SAMPLE_MULTIALG_NOREF_CSV,
reference=SAMPLE_REF_CSV)
def test_df(self):
# Given/When
exp_df = sample_sequences_df()
# Then
pdtest.assert_frame_equal(self.af.df, exp_df)
def test_frequencies(self):
# Given/When
exp_freqs = sample_sequences_freqs()
# Then
pdtest.assert_frame_equal(self.af.frequencies, exp_freqs)
def test_to_csv(self):
# Given/When
self.af.to_csv(TEST_CSV)
result = pd.read_csv(TEST_CSV)
expected = pd.read_csv(SAMPLE_FREQUENCIES)
# Then
pdtest.assert_frame_equal(result, expected)
# Real Datasets
class TestRealDatasetsX(unittest.TestCase):
def setUp(self) -> None:
self.af = AlleleFreqs.from_fasta(sequences=REAL_ALG_X_FASTA)
def test_df(self):
# Given/When
exp_df = | pd.read_csv(REAL_ALG_X_DF, index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
from matplotlib import pylab
from textwrap import fill
from . import univariate
def cross_table(variables, category1, category2, data, use_names=True):
"""
Gives a cross table of category1 and category2
Args:
variables: Variables class
category1: name of category
category2: name of category
data: structured data in pandas Data Frame
use_names: return object used variable names instead of ids
"""
# if category in ["country", "region", "district", "clinic"]:
# return data[category].value_counts()
if category1 not in variables.groups:
raise KeyError("Category1 does not exists")
if category2 not in variables.groups:
raise KeyError("Category1 does not exists")
ids1 = sorted(variables.groups[category1])
ids2 = sorted(variables.groups[category2])
if use_names:
columns = [variables.name(i) for i in ids1]
else:
columns = ids1
results = pd.DataFrame(columns=columns)
for number, i2 in enumerate(ids2):
if use_names:
name = variables.name(i2)
else:
name = i2
if i2 in data.columns:
row = []
for i1 in ids1:
if i1 in data.columns:
row.append(data[data[i1] == 1][i2].sum())
else:
row.append(0)
results.loc[name] = row
else:
results.loc[name] = [0 for _ in ids1]
return results.fillna(0)
def incidence_rate_by_category(data, category, variables, populations=None, var_id=None, name=None, exclude=[]):
"""
Calculate the incidence rates for all the groups in cateogory based on var_id
Args:
data: data frame with data
category: name of cateogory
variables: Variables object
populations: list of populations to use, same length as number of variables in category
var_id: variable_id
name: name of variable
"""
ret = pd.DataFrame(columns=["incidence_rate", "ci_lower", "ci_upper"])
for group in variables.groups[category]:
if group not in exclude:
group_data = data[data[group] == 1]
if populations:
name = variables.name(group)
if name in populations:
population = populations[name]
elif group in populations:
population = populations[group]
else:
print(name, group)
raise KeyError("Populations needs to include either variable id or name")
incidence_rate = univariate.incidence_rate(group_data, population=population,
var_id=var_id, name=name, variables=variables)
else:
incidence_rate = univariate.incidence_rate(group_data, var_id=var_id, name=name, variables=variables)
ret.loc[variables.name(group)] = [incidence_rate[0], incidence_rate[0]-incidence_rate[1][0], incidence_rate[1][1]-incidence_rate[0]]
return ret
def incidence_rate_by_location(data, level, locations, variables, populations=None, var_id=None, name=None, exclude=[]):
"""
Calculate the incidence rates for all the groups in cateogory based on var_id
Args:
data: data frame with data
category: name of cateogory
variables: Variables object
populations: list of populations to use, same length as number of variables in category
var_id: variable_id
name: name of variable
"""
ret = pd.DataFrame(columns=["incidence_rate", "ci_lower", "ci_upper"])
for loc in locations.get_level(level):
if loc not in exclude:
group_data = data[data[level] == int(loc)]
add = False
if populations:
name = locations.name(loc)
if name in populations:
population = populations[name]
elif loc in populations:
population = populations[loc]
else:
print(populations)
print(name, loc)
raise KeyError("Populations needs to include either variable id or name")
if population != 0:
add = True
incidence_rate = univariate.incidence_rate(group_data, population=population,
var_id=var_id, name=name, variables=variables)
else:
add = True
incidence_rate = univariate.incidence_rate(group_data, var_id=var_id, name=name, variables=variables)
if add:
ret.loc[locations.name(loc)] = [incidence_rate[0], incidence_rate[0]-incidence_rate[1][0], incidence_rate[1][1]-incidence_rate[0]]
return ret
def plot_incidence_rate(incidence_rates, mult_factor=1, sort=False):
"""
Plot a bar chart of incidece rates with error bars
Args:
incidence_rates: data frame with incidence rates
"""
incidence_rates = incidence_rates.copy()
if sort:
incidence_rates.sort_index(inplace=True)
error = np.array(incidence_rates[["ci_lower", "ci_upper"]]) * mult_factor
incidence_rates["incidence_rate"] = incidence_rates["incidence_rate"] * mult_factor
incidence_rates["incidence_rate"].plot(kind="bar",yerr=error.transpose())
def plot_odds_ratios(odds_ratios, rot=0):
"""
Plot a bar chart of incidece rates with error bars
Args:
incidence_rates: data frame with incidence rates
"""
upper_errors = odds_ratios["ci_upper"] - odds_ratios["odds_ratio"]
lower_errors = odds_ratios["odds_ratio"] - odds_ratios["ci_lower"]
errors = np.array([upper_errors, lower_errors])
odds_ratios["odds_ratio"].plot(kind="bar",yerr=errors, rot=rot)
ax = pylab.axis()
pylab.plot([ax[0], ax[1]], [1, 1], color="black", alpha=0.4)
def plot_many_incidence_rates(rate_list, rot=0, mult_factor=1):
"""
Plot a dictionary of different incidence rates in one plot
Args:
rate_list: ["name", incidence_rate_object]
"""
keys = [r[0] for r in rate_list]
data = rate_list[0][1].copy()
del data["incidence_rate"]
del data["ci_lower"]
del data["ci_upper"]
errors = np.zeros((2, 2, len(rate_list)))
for i, k in enumerate(keys):
data[fill(k, 20)] = rate_list[i][1]["incidence_rate"] * mult_factor
errors[:, 0, i] = rate_list[i][1]["ci_upper"] * mult_factor
errors[:, 1, i] = rate_list[i][1]["ci_lower"] * mult_factor
data.transpose().plot(kind="bar", yerr=errors, rot=rot)
def odds_ratio_many(data, diseases, group, population=None, variables=None):
"""
Calculate the incidence rates for all the groups in cateogory based on var_id
Args:
data: data frame with data
diseases: list of disease
group: (gr_1, gr_2)
population: poulation dict
"""
ret_data = | pd.DataFrame(columns=["odds_ratio", "ci_lower", "ci_upper"]) | pandas.DataFrame |
from configparser import ConfigParser
import os
import cv2
import numpy as np
import pandas as pd
import warnings
import glob
def roiByDefinition(inifile):
global ix, iy
global topLeftStatus
global overlay
global topLeftX, topLeftY, bottomRightX, bottomRightY
global ix, iy
global centerStatus
global overlay
global currCircleRadius
global centerX, centerY, radius
global centroids, toRemoveShapeName, removeStatus, toRemoveShape
global recWidth, recHeight, firstLoop
config = ConfigParser()
configFile = str(inifile)
warnings.filterwarnings('ignore',category=pd.io.pytables.PerformanceWarning)
pd.options.mode.chained_assignment = None
config.read(configFile)
vidInfPath = config.get('General settings', 'project_path')
videofilesFolder = os.path.join(vidInfPath, "videos")
logFolderPath = os.path.join(vidInfPath, 'logs')
vidInfPath = os.path.join(logFolderPath, 'video_info.csv')
vidinfDf = pd.read_csv(vidInfPath)
rectangularDf = pd.DataFrame(columns=['Video', "Shape_type", "Name", "width", "height", "topLeftX", "topLeftY"])
circleDf = | pd.DataFrame(columns=['Video', "Shape_type", "Name", "centerX", "centerY", "radius"]) | pandas.DataFrame |
import os
if not os.path.exists("temp"):
os.mkdir("temp")
def add_pi_obj_func_test():
import os
import pyemu
pst = os.path.join("utils","dewater_pest.pst")
pst = pyemu.optimization.add_pi_obj_func(pst,out_pst_name=os.path.join("temp","dewater_pest.piobj.pst"))
print(pst.prior_information.loc["pi_obj_func","equation"])
#pst._update_control_section()
assert pst.control_data.nprior == 1
def fac2real_test():
import os
import numpy as np
import pyemu
# pp_file = os.path.join("utils","points1.dat")
# factors_file = os.path.join("utils","factors1.dat")
# pyemu.utils.gw_utils.fac2real(pp_file,factors_file,
# out_file=os.path.join("utils","test.ref"))
pp_file = os.path.join("utils", "points2.dat")
factors_file = os.path.join("utils", "factors2.dat")
pyemu.geostats.fac2real(pp_file, factors_file,
out_file=os.path.join("temp", "test.ref"))
arr1 = np.loadtxt(os.path.join("utils","fac2real_points2.ref"))
arr2 = np.loadtxt(os.path.join("temp","test.ref"))
#print(np.nansum(np.abs(arr1-arr2)))
#print(np.nanmax(np.abs(arr1-arr2)))
nmax = np.nanmax(np.abs(arr1-arr2))
assert nmax < 0.01
# import matplotlib.pyplot as plt
# diff = (arr1-arr2)/arr1 * 100.0
# diff[np.isnan(arr1)] = np.nan
# p = plt.imshow(diff,interpolation='n')
# plt.colorbar(p)
# plt.show()
def vario_test():
import numpy as np
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
h = v._h_function(np.array([0.0]))
assert h == contribution
h = v._h_function(np.array([a*1000]))
assert h == 0.0
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
print(v2._h_function(np.array([a])))
def aniso_test():
import pyemu
contribution = 0.1
a = 2.0
for const in [pyemu.utils.geostats.ExpVario,pyemu.utils.geostats.GauVario,
pyemu.utils.geostats.SphVario]:
v = const(contribution,a)
v2 = const(contribution,a,anisotropy=2.0,bearing=90.0)
v3 = const(contribution,a,anisotropy=2.0,bearing=0.0)
pt0 = (0,0)
pt1 = (1,0)
assert v.covariance(pt0,pt1) == v2.covariance(pt0,pt1)
pt0 = (0,0)
pt1 = (0,1)
assert v.covariance(pt0,pt1) == v3.covariance(pt0,pt1)
def geostruct_test():
import pyemu
v1 = pyemu.utils.geostats.ExpVario(0.1,2.0)
v2 = pyemu.utils.geostats.GauVario(0.1,2.0)
v3 = pyemu.utils.geostats.SphVario(0.1,2.0)
g = pyemu.utils.geostats.GeoStruct(0.2,[v1,v2,v3])
pt0 = (0,0)
pt1 = (0,0)
print(g.covariance(pt0,pt1))
assert g.covariance(pt0,pt1) == 0.5
pt0 = (0,0)
pt1 = (1.0e+10,0)
assert g.covariance(pt0,pt1) == 0.2
def struct_file_test():
import os
import pyemu
structs = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))
#print(structs[0])
pt0 = (0,0)
pt1 = (0,0)
for s in structs:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
with open(os.path.join("utils","struct_out.dat"),'w') as f:
for s in structs:
s.to_struct_file(f)
structs1 = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct_out.dat"))
for s in structs1:
assert s.covariance(pt0,pt1) == s.nugget + \
s.variograms[0].contribution
def covariance_matrix_test():
import os
import pandas as pd
import pyemu
pts = pd.read_csv(os.path.join("utils","points1.dat"),delim_whitespace=True,
header=None,names=["name","x","y"],usecols=[0,1,2])
struct = pyemu.utils.geostats.read_struct_file(
os.path.join("utils","struct.dat"))[0]
struct.variograms[0].covariance_matrix(pts.x,pts.y,names=pts.name)
print(struct.covariance_matrix(pts.x,pts.y,names=pts.name).x)
def setup_ppcov_simple():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.struct2.out"),'','']
args3 = [pts_file,'0.0',str_file,"struct3",os.path.join("utils","ppcov.struct3.out"),'','']
for args in [args1,args2,args3]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_simple_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_test.dat")
mat1_file = os.path.join("utils","ppcov.struct1.out")
mat2_file = os.path.join("utils","ppcov.struct2.out")
mat3_file = os.path.join("utils","ppcov.struct3.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
ppc_mat3 = pyemu.Cov.from_ascii(mat3_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2,struct3 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
print(struct3)
for mat,struct in zip([ppc_mat1,ppc_mat2,ppc_mat3],[struct1,struct2,struct3]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
print(str_mat.row_names)
delt = mat.x - str_mat.x
assert np.abs(delt).max() < 1.0e-7
def setup_ppcov_complex():
import os
import platform
exe_file = os.path.join("utils","ppcov.exe")
print(platform.platform())
if not os.path.exists(exe_file) or not platform.platform().lower().startswith("win"):
print("can't run ppcov setup")
return
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
args1 = [pts_file,'0.0',str_file,"struct1",os.path.join("utils","ppcov.complex.struct1.out"),'','']
args2 = [pts_file,'0.0',str_file,"struct2",os.path.join("utils","ppcov.complex.struct2.out"),'','']
for args in [args1,args2]:
in_file = os.path.join("utils","ppcov.in")
with open(in_file,'w') as f:
f.write('\n'.join(args))
os.system(exe_file + '<' + in_file)
def ppcov_complex_test():
import os
import numpy as np
import pandas as pd
import pyemu
pts_file = os.path.join("utils","points1_test.dat")
str_file = os.path.join("utils","struct_complex.dat")
mat1_file = os.path.join("utils","ppcov.complex.struct1.out")
mat2_file = os.path.join("utils","ppcov.complex.struct2.out")
ppc_mat1 = pyemu.Cov.from_ascii(mat1_file)
ppc_mat2 = pyemu.Cov.from_ascii(mat2_file)
pts = pd.read_csv(pts_file,header=None,names=["name","x","y"],usecols=[0,1,2],
delim_whitespace=True)
struct1,struct2 = pyemu.utils.geostats.read_struct_file(str_file)
print(struct1)
print(struct2)
for mat,struct in zip([ppc_mat1,ppc_mat2],[struct1,struct2]):
str_mat = struct.covariance_matrix(x=pts.x,y=pts.y,names=pts.name)
delt = mat.x - str_mat.x
print(mat.x[:,0])
print(str_mat.x[:,0])
print(np.abs(delt).max())
assert np.abs(delt).max() < 1.0e-7
#break
def pp_to_tpl_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
print(pp_df.columns)
def tpl_to_dataframe_test():
import os
import pyemu
pp_file = os.path.join("utils","points1.dat")
pp_df = pyemu.pp_utils.pilot_points_to_tpl(pp_file,name_prefix="test_")
df_tpl = pyemu.pp_utils.pp_tpl_to_dataframe(pp_file+".tpl")
assert df_tpl.shape[0] == pp_df.shape[0]
# def to_mps_test():
# import os
# import pyemu
# jco_file = os.path.join("utils","dewater_pest.jcb")
# jco = pyemu.Jco.from_binary(jco_file)
# #print(jco.x)
# pst = pyemu.Pst(jco_file.replace(".jcb",".pst"))
# #print(pst.nnz_obs_names)
# oc_dict = {oc:"l" for oc in pst.nnz_obs_names}
# obj_func = {name:1.0 for name in pst.par_names}
#
# #pyemu.optimization.to_mps(jco=jco_file)
# #pyemu.optimization.to_mps(jco=jco_file,obs_constraint_sense=oc_dict)
# #pyemu.optimization.to_mps(jco=jco_file,obj_func="h00_00")
# decision_var_names = pst.parameter_data.loc[pst.parameter_data.pargp=="q","parnme"].tolist()
# pyemu.optimization.to_mps(jco=jco_file,obj_func=obj_func,decision_var_names=decision_var_names,
# risk=0.975)
def setup_pp_test():
import os
import pyemu
try:
import flopy
except:
return
model_ws = os.path.join("..","examples","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
pp_dir = os.path.join("utils")
#ml.export(os.path.join("temp","test_unrot_grid.shp"))
sr = pyemu.helpers.SpatialReference().from_namfile(
os.path.join(ml.model_ws, ml.namefile),
delc=ml.dis.delc, delr=ml.dis.delr)
sr.rotation = 0.
par_info_unrot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr, prefix_dict={0: "hk1",1:"hk2"},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
#print(par_info_unrot.parnme.value_counts())
gs = pyemu.geostats.GeoStruct(variograms=pyemu.geostats.ExpVario(a=1000,contribution=1.0))
ok = pyemu.geostats.OrdinaryKrige(gs,par_info_unrot)
ok.calc_factors_grid(sr)
sr2 = pyemu.helpers.SpatialReference.from_gridspec(
os.path.join(ml.model_ws, "test.spc"), lenuni=2)
par_info_drot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr2, prefix_dict={0: ["hk1_", "sy1_", "rch_"]},
every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_unrot.shp"),
)
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr2)
par_info_mrot = pyemu.pp_utils.setup_pilotpoints_grid(ml,prefix_dict={0:["hk1_","sy1_","rch_"]},
every_n_cell=2,pp_dir=pp_dir,tpl_dir=pp_dir,
shapename=os.path.join("temp","test_unrot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(ml.sr)
sr.rotation = 15
#ml.export(os.path.join("temp","test_rot_grid.shp"))
#pyemu.gw_utils.setup_pilotpoints_grid(ml)
par_info_rot = pyemu.pp_utils.setup_pilotpoints_grid(sr=sr,every_n_cell=2, pp_dir=pp_dir, tpl_dir=pp_dir,
shapename=os.path.join("temp", "test_rot.shp"))
ok = pyemu.geostats.OrdinaryKrige(gs, par_info_unrot)
ok.calc_factors_grid(sr)
print(par_info_unrot.x)
print(par_info_drot.x)
print(par_info_mrot.x)
print(par_info_rot.x)
def read_hob_test():
import os
import pyemu
hob_file = os.path.join("utils","HOB.txt")
df = pyemu.gw_utils.modflow_hob_to_instruction_file(hob_file)
print(df.obsnme)
def read_pval_test():
import os
import pyemu
pval_file = os.path.join("utils", "meras_trEnhance.pval")
pyemu.gw_utils.modflow_pval_to_template_file(pval_file)
def pp_to_shapefile_test():
import os
import pyemu
try:
import shapefile
except:
print("no pyshp")
return
pp_file = os.path.join("utils","points1.dat")
shp_file = os.path.join("temp","points1.dat.shp")
pyemu.pp_utils.write_pp_shapfile(pp_file)
def write_tpl_test():
import os
import pyemu
tpl_file = os.path.join("utils","test_write.tpl")
in_file = os.path.join("temp","tpl_test.dat")
par_vals = {"q{0}".format(i+1):12345678.90123456 for i in range(7)}
pyemu.pst_utils.write_to_template(par_vals,tpl_file,in_file)
def read_pestpp_runstorage_file_test():
import os
import pyemu
rnj_file = os.path.join("utils","freyberg.rnj")
#rnj_file = os.path.join("..", "..", "verification", "10par_xsec", "master_opt1","pest.rnj")
p1,o1 = pyemu.helpers.read_pestpp_runstorage(rnj_file)
p2,o2 = pyemu.helpers.read_pestpp_runstorage(rnj_file,9)
diff = p1 - p2
diff.sort_values("parval1",inplace=True)
def smp_to_ins_test():
import os
import pyemu
smp = os.path.join("utils","TWDB_wells.smp")
ins = os.path.join('temp',"test.ins")
try:
pyemu.pst_utils.smp_to_ins(smp,ins)
except:
pass
else:
raise Exception("should have failed")
pyemu.smp_utils.smp_to_ins(smp,ins,True)
def master_and_workers():
import shutil
import pyemu
worker_dir = os.path.join("..","verification","10par_xsec","template_mac")
master_dir = os.path.join("temp","master")
if not os.path.exists(master_dir):
os.mkdir(master_dir)
assert os.path.exists(worker_dir)
pyemu.helpers.start_workers(worker_dir,"pestpp","pest.pst",1,
worker_root="temp",master_dir=master_dir)
#now try it from within the master dir
base_cwd = os.getcwd()
os.chdir(master_dir)
pyemu.helpers.start_workers(os.path.join("..","..",worker_dir),
"pestpp","pest.pst",3,
master_dir='.')
os.chdir(base_cwd)
def first_order_pearson_regul_test():
import os
from pyemu import Schur
from pyemu.utils.helpers import first_order_pearson_tikhonov,zero_order_tikhonov
w_dir = "la"
sc = Schur(jco=os.path.join(w_dir,"pest.jcb"))
pt = sc.posterior_parameter
zero_order_tikhonov(sc.pst)
first_order_pearson_tikhonov(sc.pst,pt,reset=False)
print(sc.pst.prior_information)
sc.pst.rectify_pi()
assert sc.pst.control_data.pestmode == "regularization"
sc.pst.write(os.path.join('temp','test.pst'))
def zero_order_regul_test():
import os
import pyemu
pst = pyemu.Pst(os.path.join("pst","inctest.pst"))
pyemu.helpers.zero_order_tikhonov(pst)
print(pst.prior_information)
assert pst.control_data.pestmode == "regularization"
pst.write(os.path.join('temp','test.pst'))
pyemu.helpers.zero_order_tikhonov(pst,reset=False)
assert pst.prior_information.shape[0] == pst.npar_adj * 2
def kl_test():
import os
import numpy as np
import pandas as pd
import pyemu
import matplotlib.pyplot as plt
try:
import flopy
except:
print("flopy not imported...")
return
model_ws = os.path.join("..","verification","Freyberg","extra_crispy")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False)
str_file = os.path.join("..","verification","Freyberg","structure.dat")
arr_tru = np.loadtxt(os.path.join("..","verification",
"Freyberg","extra_crispy",
"hk.truth.ref")) + 20
basis_file = os.path.join("utils","basis.jco")
tpl_file = os.path.join("utils","test.tpl")
factors_file = os.path.join("temp","factors.dat")
num_eig = 100
prefixes = ["hk1"]
df = pyemu.utils.helpers.kl_setup(num_eig=num_eig, sr=ml.sr,
struct=str_file,
factors_file=factors_file,
basis_file=basis_file,
prefixes=prefixes,islog=False)
basis = pyemu.Matrix.from_binary(basis_file)
basis = basis[:,:num_eig]
arr_tru = np.atleast_2d(arr_tru.flatten()).transpose()
proj = np.dot(basis.T.x,arr_tru)[:num_eig]
#proj.autoalign = False
back = np.dot(basis.x, proj)
back = back.reshape(ml.nrow,ml.ncol)
df.parval1 = proj
arr = pyemu.geostats.fac2real(df,factors_file,out_file=None)
fig = plt.figure(figsize=(10, 10))
ax1, ax2 = plt.subplot(121),plt.subplot(122)
mn,mx = arr_tru.min(),arr_tru.max()
print(arr.max(), arr.min())
print(back.max(),back.min())
diff = np.abs(back - arr)
print(diff.max())
assert diff.max() < 1.0e-5
def ok_test():
import os
import pandas as pd
import pyemu
str_file = os.path.join("utils","struct_test.dat")
pts_data = pd.DataFrame({"x":[1.0,2.0,3.0],"y":[0.,0.,0.],"name":["p1","p2","p3"]})
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
interp_points = pts_data.copy()
kf = ok.calc_factors(interp_points.x,interp_points.y)
#for ptname in pts_data.name:
for i in kf.index:
assert len(kf.loc[i,"inames"])== 1
assert kf.loc[i,"ifacts"][0] == 1.0
assert sum(kf.loc[i,"ifacts"]) == 1.0
print(kf)
def ok_grid_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
kf = ok.calc_factors_grid(sr,verbose=False,var_filename=os.path.join("temp","test_var.ref"),minpts_interp=1)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ok_grid_zone_test():
try:
import flopy
except:
return
import numpy as np
import pandas as pd
import pyemu
nrow,ncol = 10,5
delr = np.ones((ncol)) * 1.0/float(ncol)
delc = np.ones((nrow)) * 1.0/float(nrow)
num_pts = 0
ptx = np.random.random(num_pts)
pty = np.random.random(num_pts)
ptname = ["p{0}".format(i) for i in range(num_pts)]
pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
pts_data.index = pts_data.name
pts_data = pts_data.loc[:,["x","y","name"]]
sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
pts_data.loc[:,"zone"] = 1
pts_data.zone.iloc[1] = 2
print(pts_data.zone.unique())
str_file = os.path.join("utils","struct_test.dat")
gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
ok = pyemu.utils.geostats.OrdinaryKrige(gs,pts_data)
zone_array = np.ones((nrow,ncol))
zone_array[0,0] = 2
kf = ok.calc_factors_grid(sr,verbose=False,
var_filename=os.path.join("temp","test_var.ref"),
minpts_interp=1,zone_array=zone_array)
ok.to_grid_factors_file(os.path.join("temp","test.fac"))
def ppk2fac_verf_test():
import os
import numpy as np
import pyemu
try:
import flopy
except:
return
ws = os.path.join("..","verification","Freyberg")
gspc_file = os.path.join(ws,"grid.spc")
pp_file = os.path.join(ws,"pp_00_pp.dat")
str_file = os.path.join(ws,"structure.complex.dat")
ppk2fac_facfile = os.path.join(ws,"ppk2fac_fac.dat")
pyemu_facfile = os.path.join("temp","pyemu_facfile.dat")
sr = flopy.utils.SpatialReference.from_gridspec(gspc_file)
ok = pyemu.utils.OrdinaryKrige(str_file,pp_file)
ok.calc_factors_grid(sr,maxpts_interp=10)
ok.to_grid_factors_file(pyemu_facfile)
zone_arr = np.loadtxt(os.path.join(ws,"extra_crispy","ref","ibound.ref"))
pyemu_arr = pyemu.utils.fac2real(pp_file,pyemu_facfile,out_file=None)
ppk2fac_arr = pyemu.utils.fac2real(pp_file,ppk2fac_facfile,out_file=None)
pyemu_arr[zone_arr == 0] = np.NaN
pyemu_arr[zone_arr == -1] = np.NaN
ppk2fac_arr[zone_arr == 0] = np.NaN
ppk2fac_arr[zone_arr == -1] = np.NaN
diff = np.abs(pyemu_arr - ppk2fac_arr)
print(diff)
assert np.nansum(diff) < 1.0e-6,np.nansum(diff)
# def opt_obs_worth():
# import os
# import pyemu
# wdir = os.path.join("utils")
# os.chdir(wdir)
# pst = pyemu.Pst(os.path.join("supply2_pest.fosm.pst"))
# zero_weight_names = [n for n,w in zip(pst.observation_data.obsnme,pst.observation_data.weight) if w == 0.0]
# #print(zero_weight_names)
# #for attr in ["base_jacobian","hotstart_resfile"]:
# # pst.pestpp_options[attr] = os.path.join(wdir,pst.pestpp_options[attr])
# #pst.template_files = [os.path.join(wdir,f) for f in pst.template_files]
# #pst.instruction_files = [os.path.join(wdir,f) for f in pst.instruction_files]
# #print(pst.template_files)
# df = pyemu.optimization.get_added_obs_importance(pst,obslist_dict={"zeros":zero_weight_names})
# os.chdir("..")
# print(df)
def mflist_budget_test():
import pyemu
import os
import pandas as pd
try:
import flopy
except:
print("no flopy...")
return
model_ws = os.path.join("..","examples","Freyberg_transient")
ml = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,check=False,load_only=[])
list_filename = os.path.join(model_ws,"freyberg.list")
assert os.path.exists(list_filename)
df = pyemu.gw_utils.setup_mflist_budget_obs(list_filename,start_datetime=ml.start_datetime)
print(df)
times = df.loc[df.index.str.startswith('vol_wells')].index.str.split(
'_', expand=True).get_level_values(2)[::100]
times = pd.to_datetime(times, yearfirst=True)
df = pyemu.gw_utils.setup_mflist_budget_obs(
list_filename, start_datetime=ml.start_datetime, specify_times=times)
flx, vol = pyemu.gw_utils.apply_mflist_budget_obs(
list_filename, 'flux.dat', 'vol.dat', start_datetime=ml.start_datetime,
times='budget_times.config'
)
assert (flx.index == vol.index).all()
assert (flx.index == times).all()
def mtlist_budget_test():
import pyemu
import pandas as pd
import os
try:
import flopy
except:
print("no flopy...")
return
list_filename = os.path.join("utils","mt3d.list")
assert os.path.exists(list_filename)
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970')
assert len(ins_files) == 2
frun_line,ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename,start_datetime='1-1-1970', gw_prefix='')
assert len(ins_files) == 2
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime=None)
assert len(ins_files) == 2
list_filename = os.path.join("utils", "mt3d_imm_sor.lst")
assert os.path.exists(list_filename)
frun_line, ins_files, df = pyemu.gw_utils.setup_mtlist_budget_obs(
list_filename, start_datetime='1-1-1970')
def geostat_prior_builder_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
# print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{str_file:tpl_file})
d1 = np.diag(cov.x)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
cov = pyemu.helpers.geostatistical_prior_builder(pst_file,{gs:df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
d2 = np.diag(cov.x)
assert np.array_equiv(d1, d2)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
cov = pyemu.helpers.geostatistical_prior_builder(pst, {gs: df},
sigma_range=4)
nnz = np.count_nonzero(cov.x)
assert nnz == pst.npar_adj
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
cov = pyemu.helpers.geostatistical_prior_builder(pst, {str_file: tpl_file})
assert cov.shape[0] == pst.npar_adj
def geostat_draws_test():
import os
import numpy as np
import pyemu
pst_file = os.path.join("pst","pest.pst")
pst = pyemu.Pst(pst_file)
print(pst.parameter_data)
tpl_file = os.path.join("utils", "pp_locs.tpl")
str_file = os.path.join("utils", "structure.dat")
pe = pyemu.helpers.geostatistical_draws(pst_file,{str_file:tpl_file})
assert (pe.shape == pe.dropna().shape)
pst.parameter_data.loc[pst.par_names[1:10], "partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
df = pyemu.pp_utils.pp_tpl_to_dataframe(tpl_file)
df.loc[:,"zone"] = np.arange(df.shape[0])
gs = pyemu.geostats.read_struct_file(str_file)
pe = pyemu.helpers.geostatistical_draws(pst_file,{gs:df},
sigma_range=4)
ttpl_file = os.path.join("temp", "temp.dat.tpl")
with open(ttpl_file, 'w') as f:
f.write("ptf ~\n ~ temp1 ~\n")
pst.add_parameters(ttpl_file, ttpl_file.replace(".tpl", ""))
pst.parameter_data.loc["temp1", "parubnd"] = 1.1
pst.parameter_data.loc["temp1", "parlbnd"] = 0.9
pst.parameter_data.loc[pst.par_names[1:10],"partrans"] = "tied"
pst.parameter_data.loc[pst.par_names[1:10], "partied"] = pst.par_names[0]
pe = pyemu.helpers.geostatistical_draws(pst, {str_file: tpl_file})
assert (pe.shape == pe.dropna().shape)
# def linearuniversal_krige_test():
# try:
# import flopy
# except:
# return
#
# import numpy as np
# import pandas as pd
# import pyemu
# nrow,ncol = 10,5
# delr = np.ones((ncol)) * 1.0/float(ncol)
# delc = np.ones((nrow)) * 1.0/float(nrow)
#
# num_pts = 0
# ptx = np.random.random(num_pts)
# pty = np.random.random(num_pts)
# ptname = ["p{0}".format(i) for i in range(num_pts)]
# pts_data = pd.DataFrame({"x":ptx,"y":pty,"name":ptname})
# pts_data.index = pts_data.name
# pts_data = pts_data.loc[:,["x","y","name"]]
#
#
# sr = flopy.utils.SpatialReference(delr=delr,delc=delc)
# pts_data.loc["i0j0", :] = [sr.xcentergrid[0,0],sr.ycentergrid[0,0],"i0j0"]
# pts_data.loc["imxjmx", :] = [sr.xcentergrid[-1, -1], sr.ycentergrid[-1, -1], "imxjmx"]
# pts_data.loc["i0j0","value"] = 1.0
# pts_data.loc["imxjmx","value"] = 0.0
#
# str_file = os.path.join("utils","struct_test.dat")
# gs = pyemu.utils.geostats.read_struct_file(str_file)[0]
# luk = pyemu.utils.geostats.LinearUniversalKrige(gs,pts_data)
# df = luk.estimate_grid(sr,verbose=True,
# var_filename=os.path.join("utils","test_var.ref"),
# minpts_interp=1)
def gslib_2_dataframe_test():
import os
import pyemu
gslib_file = os.path.join("utils","ch91pt.shp.gslib")
df = pyemu.geostats.gslib_2_dataframe(gslib_file)
print(df)
def sgems_to_geostruct_test():
import os
import pyemu
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
def load_sgems_expvar_test():
import os
import numpy as np
#import matplotlib.pyplot as plt
import pyemu
dfs = pyemu.geostats.load_sgems_exp_var(os.path.join("utils","ch00_expvar"))
xmn,xmx = 1.0e+10,-1.0e+10
for d,df in dfs.items():
xmn = min(xmn,df.x.min())
xmx = max(xmx,df.x.max())
xml_file = os.path.join("utils", "ch00")
gs = pyemu.geostats.read_sgems_variogram_xml(xml_file)
v = gs.variograms[0]
#ax = gs.plot(ls="--")
#plt.show()
#x = np.linspace(xmn,xmx,100)
#y = v.inv_h(x)
#
#plt.plot(x,y)
#plt.show()
def read_hydmod_test():
import os
import numpy as np
import pandas as pd
import pyemu
try:
import flopy
except:
return
df, outfile = pyemu.gw_utils.modflow_read_hydmod_file(os.path.join('utils','freyberg.hyd.bin'),
os.path.join('temp','freyberg.hyd.bin.dat'))
df = pd.read_csv(os.path.join('temp', 'freyberg.hyd.bin.dat'), delim_whitespace=True)
dftrue = pd.read_csv(os.path.join('utils', 'freyberg.hyd.bin.dat.true'), delim_whitespace=True)
assert np.allclose(df.obsval.values, dftrue.obsval.values)
def make_hydmod_insfile_test():
import os
import shutil
import pyemu
try:
import flopy
except:
return
shutil.copy2(os.path.join('utils','freyberg.hyd.bin'),os.path.join('temp','freyberg.hyd.bin'))
pyemu.gw_utils.modflow_hydmod_to_instruction_file(os.path.join('temp','freyberg.hyd.bin'))
#assert open(os.path.join('utils','freyberg.hyd.bin.dat.ins'),'r').read() == open('freyberg.hyd.dat.ins', 'r').read()
assert os.path.exists(os.path.join('temp','freyberg.hyd.bin.dat.ins'))
def plot_summary_test():
import os
import pandas as pd
import pyemu
try:
import matplotlib.pyplot as plt
except:
return
par_df = pd.read_csv(os.path.join("utils","freyberg_pp.par.usum.csv"),
index_col=0)
idx = list(par_df.index.map(lambda x: x.startswith("HK")))
par_df = par_df.loc[idx,:]
ax = pyemu.plot_utils.plot_summary_distributions(par_df,label_post=True)
plt.savefig(os.path.join("temp","hk_par.png"))
plt.close()
df = os.path.join("utils","freyberg_pp.pred.usum.csv")
figs,axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
#plt.show()
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_pred_{0}.png".format(i)))
plt.close(fig)
df = os.path.join("utils","freyberg_pp.par.usum.csv")
figs, axes = pyemu.plot_utils.plot_summary_distributions(df,subplots=True)
for i,fig in enumerate(figs):
plt.figure(fig.number)
plt.savefig(os.path.join("temp","test_par_{0}.png".format(i)))
plt.close(fig)
def hds_timeseries_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
model_ws =os.path.join("..","examples","Freyberg_transient")
org_hds_file = os.path.join(model_ws, "freyberg.hds")
hds_file = os.path.join("temp", "freyberg.hds")
org_cbc_file = org_hds_file.replace(".hds",".cbc")
cbc_file = hds_file.replace(".hds", ".cbc")
shutil.copy2(org_hds_file, hds_file)
shutil.copy2(org_cbc_file, cbc_file)
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, check=False)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "test": (0, 10, 14)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
# m.change_model_ws("temp",reset_external=True)
# m.write_input()
# pyemu.os_utils.run("mfnwt freyberg.nam",cwd="temp")
cmd, df1 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, include_path=True, prefix="stor",
text="storage", fill=0.0)
cmd,df2 = pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="stor",
text="storage",fill=0.0)
print(df1)
d = np.abs(df1.obsval.values - df2.obsval.values)
print(d.max())
assert d.max() == 0.0,d
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="constant head")
except:
pass
else:
raise Exception("should have failed")
try:
pyemu.gw_utils.setup_hds_timeseries(cbc_file, kij_dict, model=m, include_path=True, prefix="consthead",
text="JUNK")
except:
pass
else:
raise Exception("should have failed")
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True,prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam",model_ws=model_ws,load_only=[],check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict,model=m,include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True,prefix="hds")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1)}
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, include_path=True, prefix="hds")
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True)
pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds")
# df1 = pd.read_csv(out_file, delim_whitespace=True)
# pyemu.gw_utils.apply_hds_obs(hds_file)
# df2 = pd.read_csv(out_file, delim_whitespace=True)
# diff = df1.obsval - df2.obsval
def grid_obs_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
m_ws = os.path.join("..", "examples", "freyberg_sfr_update")
org_hds_file = os.path.join("..","examples","Freyberg_Truth","freyberg.hds")
org_multlay_hds_file = os.path.join(m_ws, "freyberg.hds") # 3 layer version
org_ucn_file = os.path.join(m_ws, "MT3D001.UCN") # mt example
hds_file = os.path.join("temp","freyberg.hds")
multlay_hds_file = os.path.join("temp", "freyberg_3lay.hds")
ucn_file = os.path.join("temp", "MT3D001.UCN")
out_file = hds_file+".dat"
multlay_out_file = multlay_hds_file+".dat"
ucn_out_file = ucn_file+".dat"
shutil.copy2(org_hds_file,hds_file)
shutil.copy2(org_multlay_hds_file, multlay_hds_file)
shutil.copy2(org_ucn_file, ucn_file)
pyemu.gw_utils.setup_hds_obs(hds_file)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert abs(diff.max()) < 1.0e-6, abs(diff.max())
pyemu.gw_utils.setup_hds_obs(multlay_hds_file)
df1 = pd.read_csv(multlay_out_file,delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval,df2.obsval), abs(diff.max())
pyemu.gw_utils.setup_hds_obs(hds_file,skip=-999)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
pyemu.gw_utils.setup_hds_obs(ucn_file, skip=1.e30, prefix='ucn')
df1 = pd.read_csv(ucn_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(ucn_file)
df2 = pd.read_csv(ucn_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
# skip = lambda x : x < -888.0
skip = lambda x: x if x > -888.0 else np.NaN
pyemu.gw_utils.setup_hds_obs(hds_file,skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = (0,0)
pyemu.gw_utils.setup_hds_obs(hds_file,kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(out_file,delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(hds_file)
df2 = pd.read_csv(out_file,delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert diff.max() < 1.0e-6
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 3*len(df2), "{} != 3*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skip)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == 2 * len(df2), "{} != 2*{}".format(len(df1), len(df2))
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=m_ws, load_only=["BAS6"],forgive=False,verbose=True)
kperk_pairs = [(0, 0), (0, 1), (0, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == np.abs(skipmask).sum(), \
"array skip failing, expecting {0} obs but returned {1}".format(np.abs(skipmask).sum(), len(df1))
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array[0]
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * m.nlay * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
kperk_pairs = [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (2, 2)]
skipmask = m.bas6.ibound.array
pyemu.gw_utils.setup_hds_obs(multlay_hds_file, kperk_pairs=kperk_pairs,
skip=skipmask)
df1 = pd.read_csv(multlay_out_file, delim_whitespace=True)
pyemu.gw_utils.apply_hds_obs(multlay_hds_file)
df2 = pd.read_csv(multlay_out_file, delim_whitespace=True)
assert len(df1) == len(df2) == 2 * np.abs(skipmask).sum(), "array skip failing"
diff = df1.obsval - df2.obsval
assert np.allclose(df1.obsval, df2.obsval), abs(diff.max())
def postprocess_inactive_conc_test():
import os
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
bd = os.getcwd()
model_ws = os.path.join("..", "examples", "Freyberg_transient")
org_hds_file = os.path.join("utils", "MT3D001.UCN")
hds_file = os.path.join("temp", "MT3D001.UCN")
shutil.copy2(org_hds_file, hds_file)
kij_dict = {"test1": [0, 0, 0], "test2": (1, 1, 1), "inact": [0, 81, 35]}
m = flopy.modflow.Modflow.load("freyberg.nam", model_ws=model_ws, load_only=[], check=False)
frun_line, df = pyemu.gw_utils.setup_hds_timeseries(hds_file, kij_dict, model=m, include_path=True, prefix="hds",
postprocess_inact=1E30)
os.chdir("temp")
df0 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df1 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
eval(frun_line)
df2 = pd.read_csv("{0}_timeseries.processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
df3 = pd.read_csv("{0}_timeseries.post_processed".format(os.path.split(hds_file)[-1]), delim_whitespace=True).T
assert np.allclose(df0, df2)
assert np.allclose(df2.test1, df3.test1)
assert np.allclose(df2.test2, df3.test2)
assert np.allclose(df3, df1)
os.chdir(bd)
def gw_sft_ins_test():
import os
import pyemu
sft_outfile = os.path.join("utils","test_sft.out")
#pyemu.gw_utils.setup_sft_obs(sft_outfile)
#pyemu.gw_utils.setup_sft_obs(sft_outfile,start_datetime="1-1-1970")
df = pyemu.gw_utils.setup_sft_obs(sft_outfile, start_datetime="1-1-1970",times=[10950.00])
#print(df)
def sfr_helper_test():
import os
import shutil
import pandas as pd
import pyemu
import flopy
#setup the process
m = flopy.modflow.Modflow.load("supply2.nam",model_ws="utils",check=False,verbose=True,forgive=False,load_only=["dis","sfr"])
sd = m.sfr.segment_data[0].copy()
sd["flow"] = 1.0
sd["pptsw"] = 1.0
m.sfr.segment_data = {k:sd.copy() for k in range(m.nper)}
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters(
m, include_temporal_pars=['hcond1', 'flow'])
print(df_sfr)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_temporal_pars.dat", delim_whitespace=False, index_col=0)
df.loc[:, "flow"] = 10.0
df.to_csv("sfr_seg_temporal_pars.dat", sep=',')
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
os.chdir("..")
for kper,sd in m1.sfr.segment_data.items():
#print(sd["flow"],sd1[kper]["flow"])
for i1,i2 in zip(sd["flow"],sd1[kper]["flow"]):
assert i1 * 10 == i2,"{0},{1}".format(i1,i2)
df_sfr = pyemu.gw_utils.setup_sfr_seg_parameters("supply2.nam", model_ws="utils", include_temporal_pars=True)
os.chdir("utils")
# change the name of the sfr file that will be created
pars = {}
with open("sfr_seg_pars.config") as f:
for line in f:
line = line.strip().split()
pars[line[0]] = line[1]
pars["sfr_filename"] = "test.sfr"
with open("sfr_seg_pars.config", 'w') as f:
for k, v in pars.items():
f.write("{0} {1}\n".format(k, v))
# change some hcond1 values
df = pd.read_csv("sfr_seg_pars.dat", delim_whitespace=False,index_col=0)
df.loc[:, "hcond1"] = 1.0
df.to_csv("sfr_seg_pars.dat", sep=',')
# make sure the hcond1 mult worked...
sd1 = pyemu.gw_utils.apply_sfr_seg_parameters().segment_data[0]
m1 = flopy.modflow.Modflow.load("supply2.nam", load_only=["sfr"], check=False)
sd2 = m1.sfr.segment_data[0]
sd1 = pd.DataFrame.from_records(sd1)
sd2 = pd.DataFrame.from_records(sd2)
# print(sd1.hcond1)
# print(sd2.hcond2)
assert sd1.hcond1.sum() == sd2.hcond1.sum()
# change some hcond1 values
df = | pd.read_csv("sfr_seg_pars.dat",delim_whitespace=False,index_col=0) | pandas.read_csv |
"""
visdex: Summary heatmap
Shows a simple correlation heatmap between numerical fields in the
loaded and filtered data file
"""
import itertools
import logging
import numpy as np
import pandas as pd
import scipy.stats as stats
from sklearn.cluster import AgglomerativeClustering
from dash.dependencies import Input, Output, State
from dash import html, dcc
import plotly.graph_objects as go
from visdex.cache import cache
from visdex.common import vstack
from visdex.timing import timing, start_timer, log_timing, print_timings
LOG = logging.getLogger(__name__)
def get_layout(app):
@app.callback(
[Output("heatmap-dropdown", "options"), Output("heatmap-dropdown", "value")],
[Input("df-filtered-loaded-div", "children")],
prevent_initial_call=True,
)
@timing
def update_heatmap_dropdown(df_loaded):
LOG.info(f"update_heatmap_dropdown {df_loaded}")
dff = cache.load("filtered")
options = [
{"label": col, "value": col}
for col in dff.columns
if dff[col].dtype in [np.int64, np.float64]
]
return options, [
col for col in dff.columns if dff[col].dtype in [np.int64, np.float64]
]
def flattened(df):
"""
Convert a DF into a Series, where the MultiIndex of each element is a combination
of the index/col from the original DF
"""
# The series contains only half of the matrix, so filter by the order of the two
# level labels.
s = pd.Series(
index=pd.MultiIndex.from_tuples(
filter(
lambda x: df.index.get_loc(x[0]) < df.index.get_loc(x[1]),
list(itertools.product(df.index, df.columns)),
),
names=["first", "second"],
),
name="value",
)
for (a, b) in s.index:
s[a, b] = df[b][a]
return s
def reorder_df(df, order):
"""
Change the row and column order of df to that given in order
"""
return df.reindex(order)[order]
def recalculate_corr_etc(selected_columns, dff, corr_dff, pval_dff, logs_dff):
start_timer("recalculate_corr_etc")
# Work out which columns/rows are needed anew, and which are already populated
# TODO: note that if we load in a new file with some of the same column names,
# then this old correlation data may be used erroneously.
existing_cols = corr_dff.columns
overlap = list(set(selected_columns).intersection(set(existing_cols)))
LOG.debug(f"these are needed and already available: {overlap}")
required_new = list(set(selected_columns).difference(set(existing_cols)))
LOG.debug(f"these are needed and not already available: {required_new}")
########
# Create initial existing vs existing DF
########
# If there is overlap, then create brand new empty dataframes.
# Otherwise, update the existing dataframes.
if len(overlap) == 0:
LOG.debug(f"create new")
corr = pd.DataFrame()
pvalues = pd.DataFrame()
logs = pd.DataFrame()
else:
# Copy across existing data rather than recalculating (so in this operation
# we drop the unneeded elements)
# Then create nan elements in corr, p-values and logs matrices for those values
# which will be calculated.
corr = corr_dff.loc[overlap, overlap]
pvalues = pval_dff.loc[overlap, overlap]
logs = logs_dff.loc[overlap, overlap]
log_timing("recalculate_corr_etc", "update_summary_heatmap-init-corr")
# Populate missing elements in correlation matrix and p-values matrix using
# stats.pearsonr
# Firstly, convert the dff columns needed to numpy (far faster
# than doing it each iteration)
start_timer("inner")
np_overlap = dff[overlap].to_numpy()
np_req = dff[required_new].to_numpy()
log_timing("inner", "update_summary_heatmap-numpy") # This is negligible
########
# Create new vs existing NumPy arrays, fill with calculated data. Then convert to
# DFs, and append those to existing vs existing DF, to create all vs existing DFs
########
new_against_existing_corr = np.full(
shape=[len(overlap), len(required_new)], fill_value=np.nan
)
new_against_existing_pval = np.full(
shape=[len(overlap), len(required_new)], fill_value=np.nan
)
new_against_existing_logs = np.full(
shape=[len(overlap), len(required_new)], fill_value=np.nan
)
log_timing("inner", "update_summary_heatmap-nae_init")
for v2 in range(len(required_new)):
for v1 in range(len(overlap)):
# Mask out any pairs that contain nans (this is done pairwise rather than
# using .dropna on the full dataframe)
mask = ~np.isnan(np_overlap[:, v1]) & ~np.isnan(np_req[:, v2])
c, p = stats.pearsonr(np_overlap[mask, v1], np_req[mask, v2])
new_against_existing_corr[v1, v2] = c
new_against_existing_pval[v1, v2] = p
new_against_existing_logs[v1, v2] = -np.log10(p)
log_timing("inner", "update_summary_heatmap-nae_calc")
new_against_existing_corr_df = pd.DataFrame(
data=new_against_existing_corr, columns=required_new, index=overlap
)
corr[required_new] = new_against_existing_corr_df
# LOG.debug(f'corr {corr}')
new_against_existing_pval_df = pd.DataFrame(
data=new_against_existing_pval, columns=required_new, index=overlap
)
pvalues[required_new] = new_against_existing_pval_df
# As new_against_existing_logs doesn't need to be transposed (the transpose is
# nans instead), don't use an intermediate DF.
logs[required_new] = pd.DataFrame(
data=new_against_existing_logs, columns=required_new, index=overlap
)
log_timing("inner", "update_summary_heatmap-nae_copy")
########
# Create existing vs new DFs by transpose (apart from logs, whose transpose is nans)
########
existing_against_new_corr = new_against_existing_corr_df.transpose()
existing_against_new_pval = new_against_existing_pval_df.transpose()
existing_against_new_logs = pd.DataFrame(
data=np.nan, columns=overlap, index=required_new
)
log_timing("inner", "update_summary_heatmap-nae_transpose")
# ####### Create new vs new NumPy arrays, fill with calculated data. Then convert
# to DFs, and append those to existing vs new DF, to create all vs new DFs #######
new_against_new_corr = np.full(
shape=[len(required_new), len(required_new)], fill_value=np.nan
)
new_against_new_pval = np.full(
shape=[len(required_new), len(required_new)], fill_value=np.nan
)
new_against_new_logs = np.full(
shape=[len(required_new), len(required_new)], fill_value=np.nan
)
log_timing("inner", "update_summary_heatmap-nan_init")
for (v2_idx, v2) in enumerate(required_new):
for (v1_idx, v1) in enumerate(required_new):
if np.isnan(new_against_new_corr[v1_idx, v2_idx]):
# Mask out any pairs that contain nans (this is done pairwise rather
# than using .dropna on the full dataframe)
mask = ~np.isnan(np_req[:, v1_idx]) & ~np.isnan(np_req[:, v2_idx])
c, p = stats.pearsonr(np_req[mask, v1_idx], np_req[mask, v2_idx])
new_against_new_corr[v1_idx, v2_idx] = c
new_against_new_corr[v2_idx, v1_idx] = c
new_against_new_pval[v1_idx, v2_idx] = p
new_against_new_pval[v2_idx, v1_idx] = p
if v1 != v2:
if required_new.index(v1) < required_new.index(v2):
new_against_new_logs[v1_idx, v2_idx] = -np.log10(p)
else:
new_against_new_logs[v2_idx, v1_idx] = -np.log10(p)
log_timing("inner", "update_summary_heatmap-nan_calc")
existing_against_new_corr[required_new] = pd.DataFrame(
data=new_against_new_corr, columns=required_new, index=required_new
)
existing_against_new_pval[required_new] = pd.DataFrame(
data=new_against_new_pval, columns=required_new, index=required_new
)
existing_against_new_logs[required_new] = pd.DataFrame(
data=new_against_new_logs, columns=required_new, index=required_new
)
log_timing("inner", "update_summary_heatmap-nan_copy")
########
# Append all vs new DFs to all vs existing DFs to give all vs all DFs.
########
corr = corr.append(existing_against_new_corr)
pvalues = pvalues.append(existing_against_new_pval)
logs = logs.append(existing_against_new_logs)
log_timing("inner", "update_summary_heatmap-ean_append", restart=False)
log_timing("recalculate_corr_etc", "update_summary_heatmap-corr", restart=False)
return corr, pvalues, logs
@app.callback(
[
Output("heatmap", "figure"),
Output("corr-loaded-div", "children"),
Output("pval-loaded-div", "children"),
],
[Input("heatmap-dropdown", "value"), Input("heatmap-clustering-input", "value")],
[State("df-loaded-div", "children")],
prevent_initial_call=True,
)
@timing
def update_summary_heatmap(dropdown_values, clusters, df_loaded):
LOG.info(f"update_summary_heatmap {dropdown_values} {clusters}")
# Guard against the first argument being an empty list, as happens at first
# invocation, or df_loaded being False
if df_loaded is False or len(dropdown_values) <= 1:
fig = go.Figure()
return fig, False, False
# Load main dataframe
dff = cache.load("filtered")
# Guard against the dataframe being empty
if dff.size == 0:
fig = go.Figure()
return fig, False, False
# Load data from previous calculation
corr_dff = cache.load("corr")
pval_dff = cache.load("pval")
logs_dff = cache.load("logs")
# The columns we want to have calculated
selected_columns = list(dropdown_values)
LOG.debug(f"selected_columns {selected_columns}")
corr, pvalues, logs = recalculate_corr_etc(
selected_columns, dff, corr_dff, pval_dff, logs_dff
)
start_timer("update_summary_heatmap")
corr.fillna(0, inplace=True)
try:
cluster = AgglomerativeClustering(
n_clusters=min(clusters, len(selected_columns)),
affinity="euclidean",
linkage="ward",
)
cluster.fit_predict(corr)
clx = cluster.labels_
except ValueError:
clx = [0] * len(selected_columns)
log_timing("update_summary_heatmap", "update_summary_heatmap-cluster")
# Save cluster number of each column to a DF and then to feather.
cluster_df = | pd.DataFrame(data=clx, index=corr.index, columns=["column_names"]) | pandas.DataFrame |
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from torch.utils.data import Dataset
import os
import pandas as pd
import pdb
import numpy as np
import math
import pickle
import random
from sklearn.utils import shuffle
class FinalTCGAPCAWG(Dataset):
def __init__(self, dataset_name = None,
data_dir=None,
mode='training',
curr_fold=1,
block_size=5000,
load=False,
addtriplettoken=False,
addpostoken=False,
addgestoken=False,
addrt=False,
nummut = 0,
frac = 0,
crossdata=False,
crossdatadir=None,
pcawg2tgca_class=False,
tcga2pcawg_class=False,
mutratio = '1-0-0-0-0-0',
adddatadir = None):
self.dataset_name = dataset_name
self.data_dir=data_dir
self.mode=mode
self.curr_fold=int(curr_fold)
self.block_size=block_size
self.load=load
self.addtriplettoken=addtriplettoken
self.addpostoken=addpostoken
self.addrt=addrt
self.nummut = nummut
self.frac = frac
self.addgestoken = addgestoken
self.crossdata= crossdata
self.crossdatadir = crossdatadir
self.adddatadir = adddatadir
self.pcawg2tgca_class=pcawg2tgca_class
self.tcga2pcawg_class=tcga2pcawg_class
self.NiSi = False
self.SNV = False
self.indel = False
self.SVMEI = False
self.Normal = False
if self.nummut > 0 :
self.block_size = self.nummut
if self.dataset_name == 'finalpcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawg_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawg_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'finaltcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcga_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcga_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'westcga':
self.training_fold = pd.read_csv('./notebookpcawg/tcgawes_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/tcgawes_valfold' + str(self.curr_fold) + '.csv',index_col=0)
elif self.dataset_name == 'wgspcawg':
self.training_fold = pd.read_csv('./notebookpcawg/pcawgwgs_trainfold' + str(self.curr_fold) + '.csv',index_col=0)
self.validation_fold = pd.read_csv('./notebookpcawg/pcawgwgs_valfold' + str(self.curr_fold) + '.csv',index_col=0)
if self.adddatadir is not None:
adddata = pd.DataFrame(columns=self.validation_fold.columns)
adddata.columns = self.validation_fold.columns
folder = os.listdir(self.adddatadir)
for i in folder:
samples = os.listdir(self.adddatadir + i )
for j in samples:
if j[0:3] == 'new':
counter = pd.read_csv(self.adddatadir + i + '/count_new_' + j[4:],index_col=0)
listall = [i,j[4:]] + counter['0'].values.tolist() + [1]
pds = pd.DataFrame(listall)
pds = pds.T
pds.columns=self.validation_fold.columns
adddata = adddata.append(pds)
adddata = adddata.reset_index(drop=True)
self.adddata = adddata
#self.validation_fold = self.validation_fold.append(self.adddata)
self.validation_fold = self.adddata
self.data_dir = self.adddatadir
self.load_classinfo()
self.vocab_mutation = pd.read_csv('./notebookpcawg/dictMutation.csv',index_col=0)
self.allSNV_index = 0
self.mutratio = mutratio.split('-')
self.mutratio = [float(i) for i in self.mutratio]
if self.mutratio[0]>0:
self.NiSi = True
if self.mutratio[1]>0:
self.SNV = True
if self.mutratio[2]>0:
self.indel = True
if self.mutratio[3]>0:
self.SVMEI = True
if self.mutratio[4]>0:
self.Normal = True
if self.NiSi:
vocabsize = len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='NiSi'])
if self.SNV:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='SNV'])
if self.indel:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='indel'])
if self.SVMEI:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ'].isin(['MEI','SV'])])
if self.Normal:
vocabsize = vocabsize + len(self.vocab_mutation.loc[self.vocab_mutation['typ']=='Normal'])
self.vocab_size = vocabsize + 1
#print(self.vocab_size)
#pdb.set_trace()
self.pd_position_vocab = pd.read_csv('./notebookpcawg/dictChpos.csv',index_col=0)
self.pd_ges_vocab = pd.read_csv('./notebookpcawg/dictGES.csv',index_col=0)
self.position_size = len(self.pd_position_vocab) + 1
self.ges_size = len(self.pd_ges_vocab) + 1
self.rt_size = 1
self.midstring = '.' + self.dataset_name + str(mutratio) + str(int(self.addtriplettoken)) + str(int(self.addpostoken)) + str(int(self.addgestoken)) + str(int(self.addrt)) + '/'
if self.mode == 'validation':
if self.crossdata:
os.makedirs(self.crossdatadir + self.midstring, exist_ok=True)
self.data_dir = self.crossdatadir
#pdb.set_trace()
else:
os.makedirs(self.data_dir + self.midstring, exist_ok=True)
def load_classinfo(self):
if self.dataset_name == 'finalpcawg':
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
else:
num_class = os.listdir(self.data_dir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.data_dir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_info = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
if self.crossdata:
self.crossdatadir = self.data_dir
num_class = os.listdir(self.crossdatadir)
name_class = [i for i in num_class if len(i.split('.'))==1]
name_class = sorted(name_class)
n_samples = []
for idx,nm_class in enumerate(name_class):
samples = os.listdir(self.crossdatadir+nm_class)
samples = [x for x in samples if x[:10]=='count_new_']
n_samples.append(len(samples))
data = list(zip(name_class, np.arange(len(name_class)),n_samples))
self.pd_class_infoto = pd.DataFrame(data,columns=['class_name','class_index','n_samples'])
self.pd_class_crossdata = pd.read_csv('./extfile/crossdata.csv',index_col =0)
#pdb.set_trace()
def get_data(self,idx):
if self.mode=='training':
instances=self.training_fold.iloc[idx]
elif self.mode=='validation':
instances=self.validation_fold.iloc[idx]
elif self.mode == 'testing':
instances=self.test_data.iloc[idx]
#if self.prioritize:
# instances=self.training_fold.loc[self.training_fold['samples']=='f8593ac0-9480-22a0-e040-11ac0d48697a.csv']
# instances=instances.iloc[0]
target_name = instances['nm_class']
#if self.crossdata:
# target_name = self.pd_class_crossdata.loc[self.pd_class_crossdata['tcga_class']==target_name]['class_name'].to_list()[0]
samples = instances[1]
avail_count = np.asarray(self.mutratio) * self.block_size
row_count = instances[['NiSi','SNV','indel','SVMEI','Normal']].to_numpy()
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
if avail_count2[0]<0:
secondmax = avail_count2[np.argmax(avail_count2)]
avail_count2 = avail_count2 * 0.7
avail_count = avail_count2
diff = avail_count - row_count
pos = diff>0
avail_count1 = row_count * pos
diff = row_count > avail_count
avail_count2 = avail_count * diff
avail_count3 = avail_count1 + avail_count2
shadowavail_count3 = avail_count3
shadowavail_count3[0] = row_count[0]
if sum(shadowavail_count3) > self.block_size:
diff = self.block_size - sum(avail_count3)
shadowavail_count3[0] = diff + avail_count3[0]
avail_count2 = shadowavail_count3.astype(int)
avail_count = avail_count2
def grab(pd_input,grabcol):
return pd_input[grabcol]
def allgrab(grabcol):
if self.NiSi:
#pdb.set_trace()
pd_nisi = pd.read_csv(self.data_dir + target_name + '/' + 'NiSi_new_' + samples,index_col=0)
pd_nisi = pd_nisi.sample(n = avail_count[0], replace = False)
pd_nisi = grab(pd_nisi,grabcol)
if self.SNV:
pd_SNV = pd.read_csv(self.data_dir + target_name + '/' + 'SNV_new_' + samples,index_col=0)
pd_SNV = pd_SNV.sample(n = avail_count[1], replace = False)
pd_SNV = grab(pd_SNV,grabcol)
pd_nisi = pd_nisi.append(pd_SNV)
if self.indel:
pd_indel = pd.read_csv(self.data_dir + target_name + '/' + 'indel_new_' + samples,index_col=0)
pd_indel = pd_indel.sample(n = avail_count[2], replace = False)
pd_indel = grab(pd_indel,grabcol)
pd_nisi = pd_nisi.append(pd_indel)
if self.SVMEI:
pd_meisv = pd.read_csv(self.data_dir + target_name + '/' + 'MEISV_new_' + samples,index_col=0)
pd_meisv = pd_meisv.sample(n = avail_count[3], replace = False)
pd_meisv = grab(pd_meisv,grabcol)
pd_nisi = pd_nisi.append(pd_meisv)
if self.Normal:
pd_normal = pd.read_csv(self.data_dir + target_name + '/' + 'Normal_new_' + samples,index_col=0)
pd_normal = pd_normal.sample(n = avail_count[4], replace = False)
pd_normal = grab(pd_normal,grabcol)
pd_nisi = pd_nisi.append(pd_normal)
pd_nisi = pd_nisi.fillna(0)
return pd_nisi
if self.addtriplettoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken'])
else:
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi = pd.read_csv(filename,index_col=0)
except:
pd_nisi = allgrab(['triplettoken'])
pd_nisi = pd_nisi.dropna()
pd_nisi.to_csv(filename)
else:
pd_nisi = allgrab(['triplettoken'])
pd_nisi.to_csv(filename)
#pdb.set_trace()
if self.addpostoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken','postoken'])
else:
#pdb.set_trace()
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi = pd.read_csv(filename,index_col=0)
except:
pd_nisi = allgrab(['triplettoken','postoken'])
pdb.set_trace()
pd_nisi.to_csv(filename)
else:
pd_nisi = allgrab(['triplettoken','postoken'])
pd_nisi.to_csv(filename)
#pdb.set_trace()
if self.addgestoken:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken','postoken','gestoken'])
else:
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi = pd.read_csv(filename,index_col=0)
except:
pd_nisi = allgrab(['triplettoken','postoken','gestoken'])
pd_nisi.to_csv(filename)
else:
pd_nisi = allgrab(['triplettoken','postoken','gestoken'])
pd_nisi.to_csv(filename)
if self.addrt:
if self.mode=='training' :
pd_nisi = allgrab(['triplettoken','postoken','gestoken','rt'])
else:
filename = self.data_dir + self.midstring + 'val_' + samples
if os.path.isfile(filename):
try:
pd_nisi = | pd.read_csv(filename,index_col=0) | pandas.read_csv |
import sys, os
sys.path.append('yolov3_detector')
from yolov3_custom_helper import yolo_detector
from darknet import Darknet
sys.path.append('pytorch-YOLOv4')
from tool.darknet2pytorch import Darknet as DarknetYolov4
import argparse
import cv2,time
import numpy as np
from tool.plateprocessing import find_coordinates, plate_to_string, padder, get_color
from tool.utils import alphanumeric_segemntor,plot_boxes_cv2
from tool.torch_utils import *
import time
from utility_codes.tsv_converter import ConverterTSV
use_cuda = True
#################### PLATE ####################
cfg_v4 = 'pytorch-YOLOv4/cfg/yolo-obj.cfg'
weight_v4 = 'weights/plate.weights'
m = DarknetYolov4(cfg_v4)
m.load_weights(weight_v4)
num_classes = m.num_classes
class_names = ['plate']
print('Loading weights from %s... Done!' % (weight_v4))
if use_cuda:
m.cuda()
# m_alpha.cuda()
# yolo_vehicle.cuda()
vehicle_save_filename = 'tsv_files/plate_tester.tsv'
vehicle_writer = ConverterTSV(vehicle_save_filename,file_type='vehicle')
image_dir = 'SIH_hackathon/Detection_Day3/Day3'
image_files = os.listdir(image_dir)
image_files.sort()
OUTPUT_SIZE = (1280, 720)
for img_name in image_files:
frame = cv2.imread(os.path.join(image_dir, img_name))
h, w = frame.shape[0:2]
sized = cv2.resize(frame, (m.width, m.height))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
confidence = 0.2
boxes = do_detect(m, sized, confidence , 0.6, use_cuda)
result_img, cls_conf_plate, coordinates_all, labels = plot_boxes_cv2(frame, boxes[0],classes_to_detect=class_names,fontScale=0.5,thick=2, savename=False, class_names=class_names)
cls_conf_plate = float(cls_conf_plate)
for i,co in enumerate(coordinates_all):
print(co)
data = [img_name, co, labels[i]]
vehicle_writer.put_vehicle(img_name, co, 'plate')
# vehicle_writer.put_vehicle(img_loc, c, 'plate')
cv2.imshow('Image', result_img)
if cv2.waitKey(1) & 0xff == ord('q'):
break
# cv2.waitKey(0)
cv2.destroyAllWindows()
import pandas as pd
def merge_and_save(fp1, fp2, outfile_path):
tsv_file1 = | pd.read_csv(fp1, sep='\t', header=0) | pandas.read_csv |
import inspect
import os
import datetime
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal, assert_frame_equal
from numpy.testing import assert_allclose
from pvlib import tmy
from pvlib import pvsystem
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib import solarposition
from pvlib.location import Location
from conftest import needs_numpy_1_10, requires_scipy
latitude = 32.2
longitude = -111
tus = Location(latitude, longitude, 'US/Arizona', 700, 'Tucson')
times = pd.date_range(start=datetime.datetime(2014,1,1),
end=datetime.datetime(2014,1,2), freq='1Min')
ephem_data = solarposition.get_solarposition(times,
latitude=latitude,
longitude=longitude,
method='nrel_numpy')
am = atmosphere.relativeairmass(ephem_data.apparent_zenith)
irrad_data = clearsky.ineichen(ephem_data['apparent_zenith'], am,
linke_turbidity=3)
aoi = irradiance.aoi(0, 0, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
meta = {'latitude': 37.8,
'longitude': -122.3,
'altitude': 10,
'Name': 'Oakland',
'State': 'CA',
'TZ': -8}
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(tmy)))
tmy3_testfile = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
tmy2_testfile = os.path.join(pvlib_abspath, 'data', '12839.tm2')
tmy3_data, tmy3_metadata = tmy.readtmy3(tmy3_testfile)
tmy2_data, tmy2_metadata = tmy.readtmy2(tmy2_testfile)
def test_systemdef_tmy3():
expected = {'tz': -9.0,
'albedo': 0.1,
'altitude': 7.0,
'latitude': 55.317,
'longitude': -160.517,
'name': '"SAND POINT"',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
def test_systemdef_tmy2():
expected = {'tz': -5,
'albedo': 0.1,
'altitude': 2.0,
'latitude': 25.8,
'longitude': -80.26666666666667,
'name': 'MIAMI',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
def test_systemdef_dict():
expected = {'tz': -8, ## Note that TZ is float, but Location sets tz as string
'albedo': 0.1,
'altitude': 10,
'latitude': 37.8,
'longitude': -122.3,
'name': 'Oakland',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 5}
assert expected == pvsystem.systemdef(meta, 5, 0, .1, 5, 5)
@needs_numpy_1_10
def test_ashraeiam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.ashraeiam(thetas, .05)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_ashraeiam():
module_parameters = pd.Series({'b': 0.05})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.ashraeiam(thetas)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physicaliam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.physicaliam(thetas, 1.526, 0.002, 4)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_physicaliam():
module_parameters = pd.Series({'K': 4, 'L': 0.002, 'n': 1.526})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.physicaliam(thetas)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
# if this completes successfully we'll be able to do more tests below.
@pytest.fixture(scope="session")
def sam_data():
data = {}
data['cecmod'] = pvsystem.retrieve_sam('cecmod')
data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
return data
@pytest.fixture(scope="session")
def sapm_module_params(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module]
return module_parameters
@pytest.fixture(scope="session")
def cec_module_params(sam_data):
modules = sam_data['cecmod']
module = 'Example_Module'
module_parameters = modules[module]
return module_parameters
def test_sapm(sapm_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with a dict input
pvsystem.sapm(effective_irradiance, temp_cell,
sapm_module_params.to_dict())
def test_PVSystem_sapm(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = | pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H') | pandas.DatetimeIndex |
import warnings
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pydotplus
import graphviz
import os
if __name__ == "__main__":
warnings.filterwarnings(action='ignore')
pd.set_option('display.max_rows', None)
# Edit (10/19) cleaned_3.csv contains only responses from the core section
# columns.
os.listdir(os.getcwd())
data = | pd.read_csv("BRFSS_core_cleaned.csv", decimal=',') | pandas.read_csv |
from bs4 import BeautifulSoup
import pandas as pd
from pprint import pprint
import re
import demjson
from utils import Apps
class DetailPage:
def __init__(self, app, page_source):
self.df_change_log = | pd.DataFrame() | pandas.DataFrame |
from kfp.components import InputPath, OutputPath
from kfp.v2.dsl import (Artifact,
Dataset,
Input,
Model,
Output,
Metrics,
ClassificationMetrics)
def get_full_tech_indi(
# tech_indi_dataset01_path: InputPath('DataFrame'),
# tech_indi_dataset02_path: InputPath('DataFrame'),
# tech_indi_dataset03_path: InputPath('DataFrame'),
# tech_indi_dataset04_path: InputPath('DataFrame'),
# tech_indi_dataset05_path: InputPath('DataFrame'),
# full_tech_indi_dataset_path: OutputPath('DataFrame')
tech_indi_dataset01: Input[Dataset],
tech_indi_dataset02: Input[Dataset],
tech_indi_dataset03: Input[Dataset],
tech_indi_dataset04: Input[Dataset],
tech_indi_dataset05: Input[Dataset],
tech_indi_dataset06: Input[Dataset],
tech_indi_dataset07: Input[Dataset],
tech_indi_dataset08: Input[Dataset],
tech_indi_dataset09: Input[Dataset],
tech_indi_dataset10: Input[Dataset],
tech_indi_dataset11: Input[Dataset],
full_tech_indi_dataset: Output[Dataset]
):
import pandas as pd
df_01 = pd.read_pickle(tech_indi_dataset01.path)
df_02 = pd.read_pickle(tech_indi_dataset02.path)
df_03 = pd.read_pickle(tech_indi_dataset03.path)
df_04 = pd.read_pickle(tech_indi_dataset04.path)
df_05 = pd.read_pickle(tech_indi_dataset05.path)
df_06 = pd.read_pickle(tech_indi_dataset06.path)
df_07 = | pd.read_pickle(tech_indi_dataset07.path) | pandas.read_pickle |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.024011, "end_time": "2021-02-02T22:30:31.951734", "exception": false, "start_time": "2021-02-02T22:30:31.927723", "status": "completed"} tags=[]
# # QA queries on new CDR_deid COPE Survey
#
# Quality checks performed on a new CDR_deid dataset using QA queries
# + papermill={"duration": 0.709639, "end_time": "2021-02-02T22:30:32.661373", "exception": false, "start_time": "2021-02-02T22:30:31.951734", "status": "completed"} tags=[]
import urllib
import pandas as pd
pd.options.display.max_rows = 120
# + papermill={"duration": 0.023643, "end_time": "2021-02-02T22:30:31.880820", "exception": false, "start_time": "2021-02-02T22:30:31.857177", "status": "completed"} tags=["parameters"]
# Parameters
project_id = ""
com_cdr = ""
deid_cdr = ""
deid_sandbox=""
# deid_base_cdr=""
# -
# df will have a summary in the end
df = pd.DataFrame(columns = ['query', 'result'])
# + [markdown] papermill={"duration": 0.02327, "end_time": "2021-02-02T22:30:32.708257", "exception": false, "start_time": "2021-02-02T22:30:32.684987", "status": "completed"} tags=[]
# # 1 Verify that the COPE Survey Data identified to be suppressed as de-identification action in OBSERVATION table have been removed from the de-id dataset.
#
# see spread sheet COPE - All Surveys Privacy Rules for details
#
# https://docs.google.com/spreadsheets/d/1UuUVcRdlp2HkBaVdROFsM4ZX_bfffg6ZoEbqj94MlXU/edit#gid=0
#
# Related tickets [DC-892] [DC-1752]
#
# [DC-1752] Refactor analysis 1 so that it provides the observation_source_concept_id, concept_code, concept_name, vocabulary_id, row count per cope survey concept (example query below). Reword the title text to read: Verify that the COPE Survey concepts identified to be suppressed as de-identification action have been removed.
#
# [DC-1784] 1310144, 1310145, 1310148, 715725, 715724
#
# The following concepts should be suppressed
#
# 715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
#
# 903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
#
# 596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144
# +
query = f'''
SELECT observation_source_concept_id, concept_name,concept_code,vocabulary_id,observation_concept_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.observation` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.observation_source_concept_id=c.concept_id
WHERE observation_source_concept_id IN
(715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144)
OR observation_concept_id IN
(715711, 1333327, 1333326, 1333014, 1333118, 1332742,1333324 ,1333012 ,1333234,
903632,702686,715714, 715724, 715725, 715726, 1310054, 1310058, 1310066, 1310146, 1310147, 1333234, 1310065,
596884, 596885, 596886, 596887, 596888, 596889, 1310137,1333016,1310148,1310145,1310144)
GROUP BY 1,2,3,4,5
ORDER BY n_row_not_pass DESC
'''
df1=pd.read_gbq(query, dialect='standard')
if df1['n_row_not_pass'].sum()==0:
df = df.append({'query' : 'Query1 No COPE in deid_observation table', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query1 No COPE in deid_observation table' , 'result' : ''},
ignore_index = True)
df1
# + [markdown] papermill={"duration": 0.023633, "end_time": "2021-02-02T22:30:36.860798", "exception": false, "start_time": "2021-02-02T22:30:36.837165", "status": "completed"} tags=[]
# # 2 Verify if a survey version is provided for the COPE survey.
#
# [DC-1040]
#
# expected results: all the person_id and the questionnaire_response_id has a survey_version_concept_id
# original sql missed something.
#
# these should be generalized 2100000002,2100000003,2100000004
# -
query = f'''
WITH df1 as (
SELECT distinct survey_version_concept_id
FROM `{project_id}.{deid_cdr}.concept` c1
LEFT JOIN `{project_id}.{deid_cdr}.concept_relationship` cr ON cr.concept_id_2 = c1.concept_id
JOIN `{project_id}.{deid_cdr}.observation` ob on ob.observation_concept_id=c1.concept_id
LEFT JOIN `{project_id}.{deid_cdr}.observation_ext` ext USING(observation_id)
WHERE
cr.concept_id_1 IN (1333174,1333343,1333207,1333310,1332811,1332812,1332715,1332813,1333101,1332814,1332815,1332816,1332817,1332818)
AND cr.relationship_id = "PPI parent code of"
)
SELECT COUNT (*) AS n_row_not_pass FROM df1
WHERE survey_version_concept_id=0 or survey_version_concept_id IS NULL
'''
df1=pd.read_gbq(query, dialect='standard')
if df1.loc[0].sum()==0:
df = df.append({'query' : 'Query2 survey version provided', 'result' : 'PASS'},
ignore_index = True)
else:
df = df.append({'query' : 'Query2 survey version provided', 'result' : ''},
ignore_index = True)
df1
# +
# new cdr
query = f'''
SELECT
distinct survey_version_concept_id
FROM `{project_id}.{deid_cdr}.observation` d
JOIN `{project_id}.{deid_cdr}.observation_ext` e
ON e.observation_id = d.observation_id
'''
df1=pd.read_gbq(query, dialect='standard')
df1.style.format("{:.0f}")
# + [markdown] papermill={"duration": 0.023649, "end_time": "2021-02-02T22:30:39.115495", "exception": false, "start_time": "2021-02-02T22:30:39.091846", "status": "completed"} tags=[]
# # 3 Verify that all structured concepts related to COVID are NOT suppressed in EHR tables
#
# DC-891
#
# 756055,4100065,37311061,439676,37311060,45763724
#
# update, Remove analyses 3, 4, and 5 as suppression of COVID concepts is no longer part of RT privacy requirements,[DC-1752]
# +
query = f'''
SELECT measurement_concept_id, concept_name,concept_code,vocabulary_id,COUNT(1) AS n_row_not_pass FROM
`{project_id}.{deid_cdr}.measurement` ob
JOIN `{project_id}.{deid_cdr}.concept` c
ON ob.measurement_concept_id=c.concept_id
WHERE measurement_concept_id=756055
GROUP BY 1,2,3,4
ORDER BY n_row_not_pass DESC
'''
df1= | pd.read_gbq(query, dialect='standard') | pandas.read_gbq |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 11 12:08:22 2017
@author: yazar
"""
import numpy as np
import pandas as pd
from scipy import linalg
from sklearn import preprocessing
from matplotlib import pyplot as plt
from scipy import optimize
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def _RSS(theta, X, y):
# number of training examples
m = len(y)
theta = theta.reshape(-1, 1)
y = y.reshape(-1, 1)
prediction = np.dot(X, theta)
mean_error = prediction - y
return 1/(2*m) * np.sum(np.power(mean_error, 2))
def _logisticCostFunc(theta, X, y):
""" compute cost for logistic regression
Parameters:
-----------
theta : ndarray, shape (n_features,)
Regression coefficients
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
Returns
-------
cost : float
cost evaluation using logistic cost function
"""
# number of training examples
m = len(y)
y = y.reshape(-1, 1)
theta = theta.reshape(-1, 1)
J = 1/m * (np.dot(-y.T, np.log(sigmoid(np.dot(X, theta)))) -
np.dot((1-y).T, np.log(1 - sigmoid(np.dot(X, theta)))))
return np.asscalar(J)
def compute_gradient(theta, X, y):
""" Compute gradient. This will be passed to minimization functions
Parameters:
-----------
theta : ndarray, shape (n_features,)
Regression coefficients
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
Returns
-------
gradient : ndarray, shape (n_features,)
"""
m = len(y)
y = y.reshape(-1, 1)
theta = theta.reshape(-1, 1)
grad = 1/m * np.dot(X.T, sigmoid(np.dot(X, theta)) - y)
return grad.ravel()
def compute_cost(theta, X, y, method='RSS'):
""" Compute cost to be used in gradient descent
Parameters:
-----------
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
theta : ndarray, shape (n_features,)
Regression coefficients
method : cost calculation method, default to 'RSS'
Only RSS is supported for now
Returns
-------
cost : float
"""
print("cost method is {0}".format(method))
if method == 'RSS':
return _RSS(theta, X, y)
elif method == 'logistic':
return _logisticCostFunc(theta, X, y)
else:
raise ValueError("only 'RSS' and 'Logistic' methods are supported.")
def normalEqn(X, y):
""" Computes the closed-form solution to linear regression
using the normal equations.
Parameters:
-----------
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
Returns
-------
theta : {array-like}, shape (n_features,)
"""
theta = np.dot(np.dot(linalg.inv(np.dot(X.T, X)), X.T), y)
return theta
def gradient_descent(X, y, theta, learning_rate, num_iters, cost_func='RSS',
):
""" Performs gradient descent to learn theta
theta = gradient_descent(x, y, theta, alpha, num_iters) updates theta
by taking num_iters gradient steps with learning rate alpha
Parameters:
-----------
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
theta : ndarray, shape (n_features,)
Regression coefficients
learning_rate : float
Controls the speed of convergence, a.k.a alpha
cost_func : cost calculation method, default to 'RSS'
Only RSS is supported for now
num_iters : int
Number of iterations
Returns
-------
calculated theta : ndarray, shape (n_features,)
Regression coefficients that minimize the cost function
cost : ndarray, shape (num_iters,)
Cost calculated for each iteration
"""
print("running gradient descent algorithm...")
# Initialize some useful values
m = len(y) # number of training examples
cost = np.zeros((num_iters,))
y = y.reshape(-1, 1)
for i in range(num_iters):
# Perform a single gradient step on the parameter vector
prediction = np.dot(X, theta) # m size vector
mean_error = prediction - y # m size vector
theta = theta - learning_rate/m * np.dot(X.T, mean_error)
# Save the cost J in every iteration
cost[i] = compute_cost(X, y, theta)
return theta, cost
def plotData(X, y, ax1=None):
train_y = pd.DataFrame(y, columns=['y'])
train = pd.concat(( | pd.DataFrame(X) | pandas.DataFrame |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == 'block':
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left_index, right_index)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.values, right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(np.asarray(left.values),
np.asarray(right.values))
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(
series, right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices
)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
return pd.timedelta_range(start='1 day', periods=k, freq=freq,
name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product(
(('foo', 'bar'), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeRangeIndex,
makeIntervalIndex, makeCategoricalIndex,
makeMultiIndex
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makeTimeDataFrame(nper) for c in cols}
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
with warnings.catch_warnings(record=True):
warnings.filterwarnings("ignore", "\\nPanel", FutureWarning)
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makePeriodFrame(nper) for c in cols}
return Panel.fromDict(data)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or (idx_type in ('i', 'f', 's', 'u',
'dt', 'p', 'td')
and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
.format(idx_type=idx_type))
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = | Index(tuples[0], name=names[0]) | pandas.Index |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
import traceback
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import pandas as pd
import scrape_common as sc
def load_with_selenium(url):
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options)
driver.get(url)
wait = WebDriverWait(driver, 10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME, "columnHeaders")))
wait.until(EC.presence_of_element_located((By.XPATH, "//input[contains(@class, 'date-slicer-input')]")))
# select the complete date range by setting 2020-02-24 as start date
begin = driver.find_element(By.XPATH, "//input[contains(@class, 'date-slicer-input')]")
begin.click()
begin.send_keys(Keys.CONTROL + "a")
begin.send_keys(Keys.DELETE)
begin.clear()
begin.send_keys("2/24/2020") # 2020-02-24 is the date of the earliest data from JU
begin.send_keys(Keys.ENTER)
driver.find_element(By.XPATH, "//div[contains(@class, 'slicer-header')]").click()
time.sleep(5)
return driver
def scrape_page_part(html):
table = BeautifulSoup(html, 'html.parser')
headers = [" ".join(cell.stripped_strings) for cell in table.find(class_='columnHeaders').find_all('div', class_='pivotTableCellWrap')]
assert len(headers) == 6, f"Number of headers changed: {len(headers)} != 6"
assert headers[0] == 'Date', f"Header changed to {headers[0]}"
assert headers[1] == 'Nouveaux cas', f"Header changed to {headers[1]}"
assert headers[2] == 'Cumul des cas confirmés', f"Header changed to {headers[2]}"
assert headers[3] == 'Cas actuellement hospitalisés', f"Header changed to {headers[3]}"
assert headers[4] == 'Cas actuellement en soins intensifs', f"Header changed to {headers[4]}"
assert headers[5] == 'Nouveaux décès', f"Header changed to {headers[5]}"
columns = table.find(class_='bodyCells').find('div', recursive=False).find('div', recursive=False).findChildren('div', recursive=False)
assert len(columns) == 6, f"Number of columns changed: {len(columns)} != 6"
cols = {}
for i, col in enumerate(columns):
values = []
for cell in col.find_all('div'):
values.append(" ".join(cell.stripped_strings).strip())
cols[headers[i]] = values
rows = | pd.DataFrame.from_dict(cols) | pandas.DataFrame.from_dict |
'''
Naming Conventions for Features:
c_ = categorical
i_ = categoricals as indexes
n_ = numerical
b_ = binary
d_ = date
'''
from . import series, dataframe,\
dataframe_engineer, dataframe_format_convert
import pandas as pd
from . import misc
def _extend_df(name, function):
df = | pd.DataFrame({}) | pandas.DataFrame |
import requests
import pandas as pd
import holoviews as hv
# Instead of using hv.extension, grab a bokeh renderer
renderer = hv.renderer('bokeh')
data = requests.get("https://squash-api.lsst.codes/measurements").json()
meas_df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
import copy
import re
from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
jinja2 = pytest.importorskip("jinja2")
from pandas.io.formats.style import ( # isort:skip
Styler,
)
from pandas.io.formats.style_render import (
_get_level_lengths,
_get_trimming_maximums,
maybe_convert_css_to_tuples,
non_reducing_slice,
)
@pytest.fixture
def mi_df():
return DataFrame(
[[1, 2], [3, 4]],
index=MultiIndex.from_product([["i0"], ["i1_a", "i1_b"]]),
columns=MultiIndex.from_product([["c0"], ["c1_a", "c1_b"]]),
dtype=int,
)
@pytest.fixture
def mi_styler(mi_df):
return Styler(mi_df, uuid_len=0)
@pytest.fixture
def mi_styler_comp(mi_styler):
# comprehensively add features to mi_styler
mi_styler = mi_styler._copy(deepcopy=True)
mi_styler.css = {**mi_styler.css, **{"row": "ROW", "col": "COL"}}
mi_styler.uuid_len = 5
mi_styler.uuid = "abcde"
mi_styler.set_caption("capt")
mi_styler.set_table_styles([{"selector": "a", "props": "a:v;"}])
mi_styler.hide(axis="columns")
mi_styler.hide([("c0", "c1_a")], axis="columns", names=True)
mi_styler.hide(axis="index")
mi_styler.hide([("i0", "i1_a")], axis="index", names=True)
mi_styler.set_table_attributes('class="box"')
mi_styler.format(na_rep="MISSING", precision=3)
mi_styler.format_index(precision=2, axis=0)
mi_styler.format_index(precision=4, axis=1)
mi_styler.highlight_max(axis=None)
mi_styler.applymap_index(lambda x: "color: white;", axis=0)
mi_styler.applymap_index(lambda x: "color: black;", axis=1)
mi_styler.set_td_classes(
DataFrame(
[["a", "b"], ["a", "c"]], index=mi_styler.index, columns=mi_styler.columns
)
)
mi_styler.set_tooltips(
DataFrame(
[["a2", "b2"], ["a2", "c2"]],
index=mi_styler.index,
columns=mi_styler.columns,
)
)
return mi_styler
@pytest.mark.parametrize(
"sparse_columns, exp_cols",
[
(
True,
[
{"is_visible": True, "attributes": 'colspan="2"', "value": "c0"},
{"is_visible": False, "attributes": "", "value": "c0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "c0"},
{"is_visible": True, "attributes": "", "value": "c0"},
],
),
],
)
def test_mi_styler_sparsify_columns(mi_styler, sparse_columns, exp_cols):
exp_l1_c0 = {"is_visible": True, "attributes": "", "display_value": "c1_a"}
exp_l1_c1 = {"is_visible": True, "attributes": "", "display_value": "c1_b"}
ctx = mi_styler._translate(True, sparse_columns)
assert exp_cols[0].items() <= ctx["head"][0][2].items()
assert exp_cols[1].items() <= ctx["head"][0][3].items()
assert exp_l1_c0.items() <= ctx["head"][1][2].items()
assert exp_l1_c1.items() <= ctx["head"][1][3].items()
@pytest.mark.parametrize(
"sparse_index, exp_rows",
[
(
True,
[
{"is_visible": True, "attributes": 'rowspan="2"', "value": "i0"},
{"is_visible": False, "attributes": "", "value": "i0"},
],
),
(
False,
[
{"is_visible": True, "attributes": "", "value": "i0"},
{"is_visible": True, "attributes": "", "value": "i0"},
],
),
],
)
def test_mi_styler_sparsify_index(mi_styler, sparse_index, exp_rows):
exp_l1_r0 = {"is_visible": True, "attributes": "", "display_value": "i1_a"}
exp_l1_r1 = {"is_visible": True, "attributes": "", "display_value": "i1_b"}
ctx = mi_styler._translate(sparse_index, True)
assert exp_rows[0].items() <= ctx["body"][0][0].items()
assert exp_rows[1].items() <= ctx["body"][1][0].items()
assert exp_l1_r0.items() <= ctx["body"][0][1].items()
assert exp_l1_r1.items() <= ctx["body"][1][1].items()
def test_mi_styler_sparsify_options(mi_styler):
with pd.option_context("styler.sparse.index", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.index", True):
html2 = mi_styler.to_html()
assert html1 != html2
with pd.option_context("styler.sparse.columns", False):
html1 = mi_styler.to_html()
with pd.option_context("styler.sparse.columns", True):
html2 = mi_styler.to_html()
assert html1 != html2
@pytest.mark.parametrize(
"rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn",
[
(100, 100, 100, None, None, 12, 6), # reduce to (12, 6) < 100 elements
(1000, 3, 750, None, None, 250, 3), # dynamically reduce rows to 250, keep cols
(4, 1000, 500, None, None, 4, 125), # dynamically reduce cols to 125, keep rows
(1000, 3, 750, 10, None, 10, 3), # overwrite above dynamics with max_row
(4, 1000, 500, None, 5, 4, 5), # overwrite above dynamics with max_col
(100, 100, 700, 50, 50, 25, 25), # rows cols below given maxes so < 700 elmts
],
)
def test_trimming_maximum(rn, cn, max_els, max_rows, max_cols, exp_rn, exp_cn):
rn, cn = _get_trimming_maximums(
rn, cn, max_els, max_rows, max_cols, scaling_factor=0.5
)
assert (rn, cn) == (exp_rn, exp_cn)
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_rows", 3),
],
)
def test_render_trimming_rows(option, val):
# test auto and specific trimming of rows
df = DataFrame(np.arange(120).reshape(60, 2))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 3 # index + 2 data cols
assert len(ctx["body"]) == 4 # 3 data rows + trimming row
assert len(ctx["body"][0]) == 3 # index + 2 data cols
@pytest.mark.parametrize(
"option, val",
[
("styler.render.max_elements", 6),
("styler.render.max_columns", 2),
],
)
def test_render_trimming_cols(option, val):
# test auto and specific trimming of cols
df = DataFrame(np.arange(30).reshape(3, 10))
with pd.option_context(option, val):
ctx = df.style._translate(True, True)
assert len(ctx["head"][0]) == 4 # index + 2 data cols + trimming col
assert len(ctx["body"]) == 3 # 3 data rows
assert len(ctx["body"][0]) == 4 # index + 2 data cols + trimming col
def test_render_trimming_mi():
midx = MultiIndex.from_product([[1, 2], [1, 2, 3]])
df = DataFrame(np.arange(36).reshape(6, 6), columns=midx, index=midx)
with pd.option_context("styler.render.max_elements", 4):
ctx = df.style._translate(True, True)
assert len(ctx["body"][0]) == 5 # 2 indexes + 2 data cols + trimming row
assert {"attributes": 'rowspan="2"'}.items() <= ctx["body"][0][0].items()
assert {"class": "data row0 col_trim"}.items() <= ctx["body"][0][4].items()
assert {"class": "data row_trim col_trim"}.items() <= ctx["body"][2][4].items()
assert len(ctx["body"]) == 3 # 2 data rows + trimming row
assert len(ctx["head"][0]) == 5 # 2 indexes + 2 column headers + trimming col
assert {"attributes": 'colspan="2"'}.items() <= ctx["head"][0][2].items()
def test_render_empty_mi():
# GH 43305
df = DataFrame(index=MultiIndex.from_product([["A"], [0, 1]], names=[None, "one"]))
expected = dedent(
"""\
>
<thead>
<tr>
<th class="index_name level0" > </th>
<th class="index_name level1" >one</th>
</tr>
</thead>
"""
)
assert expected in df.style.to_html()
@pytest.mark.parametrize("comprehensive", [True, False])
@pytest.mark.parametrize("render", [True, False])
@pytest.mark.parametrize("deepcopy", [True, False])
def test_copy(comprehensive, render, deepcopy, mi_styler, mi_styler_comp):
styler = mi_styler_comp if comprehensive else mi_styler
styler.uuid_len = 5
s2 = copy.deepcopy(styler) if deepcopy else copy.copy(styler) # make copy and check
assert s2 is not styler
if render:
styler.to_html()
excl = [
"na_rep", # deprecated
"precision", # deprecated
"cellstyle_map", # render time vars..
"cellstyle_map_columns",
"cellstyle_map_index",
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
if not deepcopy: # check memory locations are equal for all included attributes
for attr in [a for a in styler.__dict__ if (not callable(a) and a not in excl)]:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else: # check memory locations are different for nested or mutable vars
shallow = [
"data",
"columns",
"index",
"uuid_len",
"uuid",
"caption",
"cell_ids",
"hide_index_",
"hide_columns_",
"hide_index_names",
"hide_column_names",
"table_attributes",
]
for attr in shallow:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
for attr in [
a
for a in styler.__dict__
if (not callable(a) and a not in excl and a not in shallow)
]:
if getattr(s2, attr) is None:
assert id(getattr(s2, attr)) == id(getattr(styler, attr))
else:
assert id(getattr(s2, attr)) != id(getattr(styler, attr))
def test_clear(mi_styler_comp):
# NOTE: if this test fails for new features then 'mi_styler_comp' should be updated
# to ensure proper testing of the 'copy', 'clear', 'export' methods with new feature
# GH 40675
styler = mi_styler_comp
styler._compute() # execute applied methods
clean_copy = Styler(styler.data, uuid=styler.uuid)
excl = [
"data",
"index",
"columns",
"uuid",
"uuid_len", # uuid is set to be the same on styler and clean_copy
"cell_ids",
"cellstyle_map", # execution time only
"cellstyle_map_columns", # execution time only
"cellstyle_map_index", # execution time only
"precision", # deprecated
"na_rep", # deprecated
"template_latex", # render templates are class level
"template_html",
"template_html_style",
"template_html_table",
]
# tests vars are not same vals on obj and clean copy before clear (except for excl)
for attr in [a for a in styler.__dict__ if not (callable(a) or a in excl)]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert not (all(res) if (hasattr(res, "__iter__") and len(res) > 0) else res)
# test vars have same vales on obj and clean copy after clearing
styler.clear()
for attr in [a for a in styler.__dict__ if not (callable(a))]:
res = getattr(styler, attr) == getattr(clean_copy, attr)
assert all(res) if hasattr(res, "__iter__") else res
def test_export(mi_styler_comp, mi_styler):
exp_attrs = [
"_todo",
"hide_index_",
"hide_index_names",
"hide_columns_",
"hide_column_names",
"table_attributes",
"table_styles",
"css",
]
for attr in exp_attrs:
check = getattr(mi_styler, attr) == getattr(mi_styler_comp, attr)
assert not (
all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
)
export = mi_styler_comp.export()
used = mi_styler.use(export)
for attr in exp_attrs:
check = getattr(used, attr) == getattr(mi_styler_comp, attr)
assert all(check) if (hasattr(check, "__iter__") and len(check) > 0) else check
used.to_html()
def test_hide_raises(mi_styler):
msg = "`subset` and `level` cannot be passed simultaneously"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", subset="something", level="something else")
msg = "`level` must be of type `int`, `str` or list of such"
with pytest.raises(ValueError, match=msg):
mi_styler.hide(axis="index", level={"bad": 1, "type": 2})
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
def test_hide_index_level(mi_styler, level):
mi_styler.index.names, mi_styler.columns.names = ["zero", "one"], ["zero", "one"]
ctx = mi_styler.hide(axis="index", level=level)._translate(False, True)
assert len(ctx["head"][0]) == 3
assert len(ctx["head"][1]) == 3
assert len(ctx["head"][2]) == 4
assert ctx["head"][2][0]["is_visible"]
assert not ctx["head"][2][1]["is_visible"]
assert ctx["body"][0][0]["is_visible"]
assert not ctx["body"][0][1]["is_visible"]
assert ctx["body"][1][0]["is_visible"]
assert not ctx["body"][1][1]["is_visible"]
@pytest.mark.parametrize("level", [1, "one", [1], ["one"]])
@pytest.mark.parametrize("names", [True, False])
def test_hide_columns_level(mi_styler, level, names):
mi_styler.columns.names = ["zero", "one"]
if names:
mi_styler.index.names = ["zero", "one"]
ctx = mi_styler.hide(axis="columns", level=level)._translate(True, False)
assert len(ctx["head"]) == (2 if names else 1)
@pytest.mark.parametrize("method", ["applymap", "apply"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header(method, axis):
# GH 41893
df = DataFrame({"A": [0, 0], "B": [1, 1]}, index=["C", "D"])
func = {
"apply": lambda s: ["attr: val" if ("A" in v or "C" in v) else "" for v in s],
"applymap": lambda v: "attr: val" if ("A" in v or "C" in v) else "",
}
# test execution added to todo
result = getattr(df.style, f"{method}_index")(func[method], axis=axis)
assert len(result._todo) == 1
assert len(getattr(result, f"ctx_{axis}")) == 0
# test ctx object on compute
result._compute()
expected = {
(0, 0): [("attr", "val")],
}
assert getattr(result, f"ctx_{axis}") == expected
@pytest.mark.parametrize("method", ["apply", "applymap"])
@pytest.mark.parametrize("axis", ["index", "columns"])
def test_apply_map_header_mi(mi_styler, method, axis):
# GH 41893
func = {
"apply": lambda s: ["attr: val;" if "b" in v else "" for v in s],
"applymap": lambda v: "attr: val" if "b" in v else "",
}
result = getattr(mi_styler, f"{method}_index")(func[method], axis=axis)._compute()
expected = {(1, 1): [("attr", "val")]}
assert getattr(result, f"ctx_{axis}") == expected
def test_apply_map_header_raises(mi_styler):
# GH 41893
with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"):
mi_styler.applymap_index(lambda v: "attr: val;", axis="bad")._compute()
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({"A": np.random.permutation(range(6))})
self.df = DataFrame({"A": [0, 1], "B": np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo="bar"):
return pd.Series(f"color: {foo}", index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = DataFrame({"A": ["color: red", "color: blue"]})
self.dataframes = [
self.df,
DataFrame(
{"f": [1.0, 2.0], "o": ["a", "b"], "c": pd.Categorical(["a", "b"])}
),
]
self.blank_value = " "
def test_init_non_pandas(self):
msg = "``data`` must be a Series or DataFrame"
with pytest.raises(TypeError, match=msg):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824 / 41395
assert "tex2jax_ignore" not in self.styler._repr_html_()
with pd.option_context("styler.html.mathjax", False):
assert "tex2jax_ignore" in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): [("color", "red")], (1, 0): [("color", "blue")]}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_and_trailing_semi(self):
attrs = DataFrame({"A": ["color: red; foo: bar", "color:blue ; foo: baz;"]})
self.styler._update_ctx(attrs)
expected = {
(0, 0): [("color", "red"), ("foo", "bar")],
(1, 0): [("color", "blue"), ("foo", "baz")],
}
assert self.styler.ctx == expected
def test_render(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_multiple_render(self):
# GH 39396
s = Styler(self.df, uuid_len=0).applymap(lambda x: "color: red;", subset=["A"])
s.to_html() # do 2 renders to ensure css styles not duplicated
assert (
'<style type="text/css">\n#T__row0_col0, #T__row1_col0 {\n'
" color: red;\n}\n</style>" in s.to_html()
)
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.to_html()
# An index but no columns
DataFrame(columns=["a"]).style.to_html()
# A column but no index
DataFrame(index=["a"]).style.to_html()
# No IndexError raised?
def test_render_double(self):
df = DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(
["color: red; border: 1px", "color: blue; border: 2px"], name=x.name
)
s = Styler(df, uuid="AB").apply(style)
s.to_html()
# it worked?
def test_set_properties(self):
df = DataFrame({"A": [0, 1]})
result = df.style.set_properties(color="white", size="10px")._compute().ctx
# order is deterministic
v = [("color", "white"), ("size", "10px")]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = DataFrame({"A": [0, 1]})
result = (
df.style.set_properties(subset=pd.IndexSlice[0, "A"], color="white")
._compute()
.ctx
)
expected = {(0, 0): [("color", "white")]}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.style._translate(True, True)
assert len(result["head"]) == 1
expected = {
"class": "blank level0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
}
assert expected.items() <= result["head"][0][0].items()
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index("A").style._translate(True, True)
expected = {
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
}
assert expected.items() <= result["head"][1][0].items()
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
result = df.set_index(["A", "B"]).style._translate(True, True)
expected = [
{
"class": "index_name level0",
"type": "th",
"value": "A",
"is_visible": True,
"display_value": "A",
},
{
"class": "index_name level1",
"type": "th",
"value": "B",
"is_visible": True,
"display_value": "B",
},
{
"class": "blank col0",
"type": "th",
"value": self.blank_value,
"is_visible": True,
"display_value": self.blank_value,
},
]
assert result["head"][1] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = DataFrame({0: [1, 2, 3]})
df.style._translate(True, True)
def test_apply_axis(self):
df = DataFrame({"A": [0, 0], "B": [1, 1]})
f = lambda x: [f"val: {x.max()}" for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {
(0, 0): [("val", "1")],
(0, 1): [("val", "1")],
(1, 0): [("val", "1")],
(1, 1): [("val", "1")],
}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {
(0, 0): [("val", "0")],
(0, 1): [("val", "1")],
(1, 0): [("val", "0")],
(1, 1): [("val", "1")],
}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_series_return(self, axis):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
# test Series return where len(Series) < df.index or df.columns but labels OK
func = lambda s: pd.Series(["color: red;"], index=["Y"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
# test Series return where labels align but different order
func = lambda s: pd.Series(["color: red;", "color: blue;"], index=["Y", "X"])
result = df.style.apply(func, axis=axis)._compute().ctx
assert result[(0, 0)] == [("color", "blue")]
assert result[(1, 1)] == [("color", "red")]
assert result[(1 - axis, axis)] == [("color", "red")]
assert result[(axis, 1 - axis)] == [("color", "blue")]
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("columns", [False, True])
def test_apply_dataframe_return(self, index, columns):
# GH 42014
df = DataFrame([[1, 2], [3, 4]], index=["X", "Y"], columns=["X", "Y"])
idxs = ["X", "Y"] if index else ["Y"]
cols = ["X", "Y"] if columns else ["Y"]
df_styles = DataFrame("color: red;", index=idxs, columns=cols)
result = df.style.apply(lambda x: df_styles, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")] # (Y,Y) styles always present
assert (result[(0, 1)] == [("color", "red")]) is index # (X,Y) only if index
assert (result[(1, 0)] == [("color", "red")]) is columns # (Y,X) only if cols
assert (result[(0, 0)] == [("color", "red")]) is (index and columns) # (X,X)
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_apply_subset(self, slice_, axis):
result = (
self.df.style.apply(self.h, axis=axis, subset=slice_, foo="baz")
._compute()
.ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:],
pd.IndexSlice[:, ["A"]],
pd.IndexSlice[[1], :],
pd.IndexSlice[[1], ["A"]],
pd.IndexSlice[:2, ["A", "B"]],
],
)
def test_applymap_subset(self, slice_):
result = (
self.df.style.applymap(lambda x: "color:baz;", subset=slice_)._compute().ctx
)
expected = {
(r, c): [("color", "baz")]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and col in self.df.loc[slice_].columns
}
assert result == expected
@pytest.mark.parametrize(
"slice_",
[
pd.IndexSlice[:, pd.IndexSlice["x", "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, "A"]],
pd.IndexSlice[:, pd.IndexSlice[:, ["A", "C"]]], # missing col element
pd.IndexSlice[pd.IndexSlice["a", 1], :],
pd.IndexSlice[pd.IndexSlice[:, 1], :],
pd.IndexSlice[pd.IndexSlice[:, [1, 3]], :], # missing row element
pd.IndexSlice[:, ("x", "A")],
pd.IndexSlice[("a", 1), :],
],
)
def test_applymap_subset_multiindex(self, slice_):
# GH 19861
# edited for GH 33562
warn = None
msg = "indexing on a MultiIndex with a nested sequence of labels"
if (
isinstance(slice_[-1], tuple)
and isinstance(slice_[-1][-1], list)
and "C" in slice_[-1][-1]
):
warn = FutureWarning
elif (
isinstance(slice_[0], tuple)
and isinstance(slice_[0][1], list)
and 3 in slice_[0][1]
):
warn = FutureWarning
idx = MultiIndex.from_product([["a", "b"], [1, 2]])
col = MultiIndex.from_product([["x", "y"], ["A", "B"]])
df = DataFrame(np.random.rand(4, 4), columns=col, index=idx)
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
df.style.applymap(lambda x: "color: red;", subset=slice_).to_html()
def test_applymap_subset_multiindex_code(self):
# https://github.com/pandas-dev/pandas/issues/25858
# Checks styler.applymap works with multindex when codes are provided
codes = np.array([[0, 0, 1, 1], [0, 1, 0, 1]])
columns = MultiIndex(
levels=[["a", "b"], ["%", "#"]], codes=codes, names=["", ""]
)
df = DataFrame(
[[1, -1, 1, 1], [-1, 1, 1, 1]], index=["hello", "world"], columns=columns
)
pct_subset = pd.IndexSlice[:, pd.IndexSlice[:, "%":"%"]]
def color_negative_red(val):
color = "red" if val < 0 else "black"
return f"color: {color}"
df.loc[pct_subset]
df.style.applymap(color_negative_red, subset=pct_subset)
def test_empty(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("", "")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0"]},
{"props": [("", "")], "selectors": ["row1_col0"]},
]
assert result == expected
def test_duplicate(self):
df = DataFrame({"A": [1, 0]})
s = df.style
s.ctx = {(0, 0): [("color", "red")], (1, 0): [("color", "red")]}
result = s._translate(True, True)["cellstyle"]
expected = [
{"props": [("color", "red")], "selectors": ["row0_col0", "row1_col0"]}
]
assert result == expected
def test_init_with_na_rep(self):
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = Styler(df, na_rep="NA")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "NA"
assert ctx["body"][0][2]["display_value"] == "NA"
def test_caption(self):
styler = Styler(self.df, caption="foo")
result = styler.to_html()
assert all(["caption" in result, "foo" in result])
styler = self.df.style
result = styler.set_caption("baz")
assert styler is result
assert styler.caption == "baz"
def test_uuid(self):
styler = Styler(self.df, uuid="abc123")
result = styler.to_html()
assert "abc123" in result
styler = self.df.style
result = styler.set_uuid("aaa")
assert result is styler
assert result.uuid == "aaa"
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = DataFrame({"a": [1, 3, 5, 6], "b": [2, 4, 12, 21]})
result = df.style.to_html(uuid="test")
assert "test" in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{"selector": "th", "props": [("foo", "bar")]}] # default format
styler = Styler(self.df, table_styles=style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
# GH 39563
style = [{"selector": "th", "props": "foo:bar;"}] # css string format
styler = self.df.style.set_table_styles(style)
result = " ".join(styler.to_html().split())
assert "th { foo: bar; }" in result
def test_table_styles_multiple(self):
ctx = self.df.style.set_table_styles(
[
{"selector": "th,td", "props": "color:red;"},
{"selector": "tr", "props": "color:green;"},
]
)._translate(True, True)["table_styles"]
assert ctx == [
{"selector": "th", "props": [("color", "red")]},
{"selector": "td", "props": [("color", "red")]},
{"selector": "tr", "props": [("color", "green")]},
]
def test_table_styles_dict_multiple_selectors(self):
# GH 44011
result = self.df.style.set_table_styles(
[{"selector": "th,td", "props": [("border-left", "2px solid black")]}]
)._translate(True, True)["table_styles"]
expected = [
{"selector": "th", "props": [("border-left", "2px solid black")]},
{"selector": "td", "props": [("border-left", "2px solid black")]},
]
assert result == expected
def test_maybe_convert_css_to_tuples(self):
expected = [("a", "b"), ("c", "d e")]
assert maybe_convert_css_to_tuples("a:b;c:d e;") == expected
assert maybe_convert_css_to_tuples("a: b ;c: d e ") == expected
expected = []
assert maybe_convert_css_to_tuples("") == expected
def test_maybe_convert_css_to_tuples_err(self):
msg = "Styles supplied as string must follow CSS rule formats"
with pytest.raises(ValueError, match=msg):
maybe_convert_css_to_tuples("err")
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.to_html()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).to_html()
assert 'class="foo" data-bar' in result
def test_apply_none(self):
def f(x):
return DataFrame(
np.where(x == x.max(), "color: red", ""),
index=x.index,
columns=x.columns,
)
result = DataFrame([[1, 2], [3, 4]]).style.apply(f, axis=None)._compute().ctx
assert result[(1, 1)] == [("color", "red")]
def test_trim(self):
result = self.df.style.to_html() # trim=True
assert result.count("#") == 0
result = self.df.style.highlight_max().to_html()
assert result.count("#") == len(self.df.columns)
def test_export(self):
f = lambda x: "color: red" if x > 0 else "color: blue"
g = lambda x, z: f"color: {z}" if x > 0 else f"color: {z}"
style1 = self.styler
style1.applymap(f).applymap(g, z="b").highlight_max()._compute() # = render
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.to_html()
def test_bad_apply_shape(self):
df = DataFrame([[1, 2], [3, 4]], index=["A", "B"], columns=["X", "Y"])
msg = "resulted in the apply method collapsing to a Series."
with pytest.raises(ValueError, match=msg):
df.style._apply(lambda x: "x")
msg = "created invalid {} labels"
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: [""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: ["", "", "", ""])
with pytest.raises(ValueError, match=msg.format("index")):
df.style._apply(lambda x: pd.Series(["a:v;", ""], index=["A", "C"]), axis=0)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: ["", "", ""], axis=1)
with pytest.raises(ValueError, match=msg.format("columns")):
df.style._apply(lambda x: | pd.Series(["a:v;", ""], index=["X", "Z"]) | pandas.Series |
from __future__ import annotations
import pandas as pd
import geopandas as gpd
from pathlib import Path
from tqdm import tqdm
import pg_data_etl as pg
from network_routing import pg_db_connection
from network_routing.accessibility.logic_analyze import get_unique_ids
class IsochroneGenerator:
"""
- This class consumes the ouputs from `access eta-individual COUNTY`
- It takes the node-level access analysis results and generates isochrones
around each POI for both networks, sized to the specified distance threshold
and walking speed
Attributes:
db (pg.Database): analysis database
poi_table (str): name of the POI table
poi_col (str): name of the unique ID column in the POI table
network_a_edges (str): name of network 'A's edge table
network_a_nodes (str): name of network 'A's node table
network_a_node_id_col (str): name of ID column in network 'A's node table
network_b_edges (str): name of network 'B's edge table
network_b_nodes (str): name of network 'B's node table
network_b_node_id_col (str): name of ID column in network 'B's node table
distance_threshold_miles (float): distance to use for isochrones. Defaults to 1.0
walking_speed_mph (float): Assumned walking speed of pedestrians, defaults to 2.5 mph
data_dir (str): folder where outputs from earlier process were stored. Defaults to "./data"
"""
def __init__(
self,
db: pg.Database,
poi_table: str,
poi_col: str,
network_a_edges: str,
network_a_nodes: str,
network_a_node_id_col: str,
network_b_edges: str,
network_b_nodes: str,
network_b_node_id_col: str,
distance_threshold_miles: float = 1.0,
walking_speed_mph: float = 2.5,
data_dir: str = "./data",
):
self.db = db
self.data_dir = Path(data_dir)
self.minutes_cutoff = distance_threshold_miles * 60 / walking_speed_mph
self.data_names = {
"a": {
"edges": network_a_edges,
"nodes": network_a_nodes,
"node_id_col": network_a_node_id_col,
},
"b": {
"edges": network_b_edges,
"nodes": network_b_nodes,
"node_id_col": network_b_node_id_col,
},
"poi": {"table": poi_table, "id_col": poi_col},
}
# Read all filenames for networks A and B
tables = {
"a": [x for x in self.data_dir.rglob(f"{network_a_edges}_*.csv")],
"b": [x for x in self.data_dir.rglob(f"{network_b_edges}_*.csv")],
}
# For each POI ID, record A and B filepaths if they exist
uids = get_unique_ids(db, poi_table, poi_col)
self.uid_results = {
raw_id: {"clean_id": clean_id, "a": None, "b": None}
for raw_id, clean_id in uids.items()
}
for uid in self.uid_results:
clean_id = uids[uid]
a_path = self.data_dir / f"{network_a_edges}_{clean_id}.csv"
b_path = self.data_dir / f"{network_b_edges}_{clean_id}.csv"
if a_path in tables["a"]:
self.uid_results[uid]["a"] = a_path
if b_path in tables["b"]:
self.uid_results[uid]["b"] = b_path
# For each ID, gather node lists or None
self.data = {
k: {"a": self.load_data(v["a"]), "b": self.load_data(v["b"])}
for k, v in self.uid_results.items()
}
def load_data(self, filepath: Path | None) -> tuple | None:
"""
- Read CSV file from disk
- Filter out rows beyond `self.minutes_cutoff`
Arguments:
filepath (Path | None): filepath to CSV file or None value
Returns:
tuple: if filepath is not None, read CSV with pandas and return a list of node IDs that meet the `minutes_cutoff`
"""
if filepath:
# Read CSV
df = pd.read_csv(filepath)
# Filter to only include rows that are at or below the
# defined cutoff time in minutes
df = df[df["n_1"] <= self.minutes_cutoff]
# Get a list of the remaining node id values as a tuple
nodes_as_tuple = tuple(df["node_id"].unique())
return nodes_as_tuple
else:
return None
def make_concave_hull(self, eta_uid: str) -> gpd.GeoDataFrame | None:
"""
- Generate a set of concave hulls for a single UID, using networks A and B
Arguments:
eta_uid (str): ID of the POI
Returns:
gpd.GeoDataFrame: polygons for both networks, if there are results
"""
# Only load node lists that exist. Skip 'None' values
node_lists = {}
for network_id in ["a", "b"]:
data = self.data[eta_uid][network_id]
if data:
node_lists[network_id] = data
gdfs = []
for network_id, node_filter in node_lists.items():
edge_table = self.data_names[network_id]["edges"]
node_table = self.data_names[network_id]["nodes"]
node_id_col = self.data_names[network_id]["node_id_col"]
num_nodes = len(node_filter)
if num_nodes > 0:
# See: https://postgis.net/docs/ST_CollectionExtract.html
if num_nodes == 1:
geom_idx = 1
node_filter = f"({node_filter[0]})"
elif num_nodes == 2:
geom_idx = 2
else:
geom_idx = 3
query = f"""
select
'{eta_uid}' as eta_uid,
'{edge_table}' as src_network,
st_buffer(
st_collectionextract(
st_concavehull(
st_collect(geom),
0.99
),
{geom_idx}),
45) as geom
from {node_table}
where {node_id_col} in {node_filter}
"""
gdfs.append(self.db.gdf(query))
if len(gdfs) == 0:
return None
else:
return | pd.concat(gdfs) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # Generate Generative Model Figures
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
import os
import glob
from collections import OrderedDict
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import pandas as pd
import plotnine as p9
import seaborn as sns
import pdb
import scipy.stats as ss
# In[2]:
def get_dataframes(
result_dir, file_path,
starting_point=0,
ending_point=30,
step=5, num_of_points=4
):
"""
This function grabs the result tsv files
and loads then into a dictionary strucutre
[relationship] -> dataframe
Args:
result_dir - the directory containing all the results
file_path - the path to extract the result files
starting_point - the point to start each subgraph with in plot_graph function
ending_point - the point to end each subgraph with
step - the number to increase the middle points with
num_of_points - the number of points to plot between the start and end points
"""
# Build up X axis by gathering relatively evely spaced points
query_points = [starting_point]
query_points += [1 + step*index for index in range(num_of_points)]
query_points += [ending_point]
return {
# Get the head word of each file that will be parsed
os.path.splitext(os.path.basename(file))[0].split("_")[0]:
pd.read_csv(file, sep="\t")
.query("lf_num in @query_points", engine="python", local_dict={"query_points":query_points})
.assign(
lf_num=lambda x:x['lf_num'].map(lambda y: str(y) if y != ending_point else 'All')
)
for file in glob.glob(f"{result_dir}/{file_path}")
}
# In[3]:
file_tree = OrderedDict({
"DaG":
{
"DaG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../disease_gene/disease_associates_gene/label_sampling_experiment/results/all/results",
},
"CtD":
{
"DaG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/DaG/results",
"CtD": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/CtD/results",
"CbG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/CbG/results",
"GiG": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/GiG/results",
"All": "../../../compound_disease/compound_treats_disease/label_sampling_experiment/results/all/results",
},
"CbG":
{
"DaG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../compound_gene/compound_binds_gene/label_sampling_experiment/results/all/results",
},
"GiG":
{
"DaG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/DaG/results",
"CtD": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/CtD/results",
"CbG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/CbG/results",
"GiG": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/GiG/results",
"All": "../../../gene_gene/gene_interacts_gene/label_sampling_experiment/results/all/results",
}
})
# In[4]:
# End total of label functions for each point
end_points = {
"DaG": 30,
"CtD": 22,
"CbG": 20,
"GiG": 28,
"All": 100
}
# In[5]:
critical_val = ss.norm.ppf(0.975)
# In[6]:
color_names = {
"turquoise": pd.np.array([27, 158, 119, 255])/255,
"orange": pd.np.array([217, 95, 2, 255])/255,
"purple": pd.np.array([117, 112, 179, 255])/255,
"pink": pd.np.array([231, 41, 138, 255])/255,
"light-green": pd.np.array([102, 166, 30, 255])/255
}
# In[7]:
color_map = {
"DaG": color_names["turquoise"],
"CtD": color_names["orange"],
"CbG": color_names["purple"],
"GiG": color_names["pink"],
"All": color_names["light-green"]
}
# In[8]:
# Use the file tree above and graph the appropiate files
performance_data_tree = OrderedDict({
key: {
sub_key: get_dataframes(
file_tree[key][sub_key], "*sampled_results.tsv",
ending_point=end_points[sub_key],
# if using all the label functions step by 32 instead of 5
step=5 if sub_key != "All" else 32
)
for sub_key in file_tree[key]
}
for key in file_tree
})
# In[9]:
dev_performance_df = | pd.DataFrame([], columns=['aupr', 'auroc', 'lf_num', 'predicted', 'lf_source']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 17:14:29 2020
@author: p000526841
"""
from pathlib import Path
import numpy as np
import pandas as pd
from datetime import datetime
import inspect
#from matplotlib_venn import venn2
from utils import *
plt.rcParams['font.family'] = 'IPAexGothic'
@contextmanager
def save_fig(path_to_save=PATH_TO_GRAPH_DIR/f"tmp.png"):
plt.figure()
yield
plt.savefig(path_to_save)
def showCorr(df, str_value_name, show_percentage=0.6):
corrmat = df.corr()
num_of_col = len(corrmat.columns)
cols = corrmat.nlargest(num_of_col, str_value_name)[str_value_name]
tmp = cols[(cols >= show_percentage) | (cols <= -show_percentage)]
print("*****[ corr : " + str_value_name + " ]*****")
print(tmp)
print("*****[" + str_value_name + "]*****")
print("\n")
#print(tmp[0])
def showBoxPlot(df, str_val1, str_va2):
plt.figure(figsize=(15, 8))
plt.xticks(rotation=90, size='small')
#neigh_median = df.groupby([str_val1],as_index=False)[str_va2].median().sort_values(str_va2)
#print(neigh_median)
#col_order = neigh_median[str_val1].values
#sns.boxplot(x=df[str_val1], y =df[str_va2], order=col_order)
sns.boxplot(x=df[str_val1], y =df[str_va2])
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_box_plot_{}.png".format(str_val1))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
def createVenn(train_set, test_set, title_str, path_to_save, train_label="train", test_label="test"):
plt.figure()
#venn2(subsets=[train_set,test_set],set_labels=(train_label,test_label))
plt.title(f'{title_str}',fontsize=20)
#path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(title_str))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
def showValueCount(df_train, df_test, str_value, str_target, debug=True, regression_flag=1, _fig_size=(20,10)):
if str_value == str_target:
df_test[str_value] = np.nan
df = pd.concat([df_train, df_test])
if not str_value in df.columns:
print(str_value, " is not inside columns")
return
se_all = df[str_value]
se_train = df_train[str_value]
se_test = df_test[str_value]
all_desc = se_all.describe()
train_desc = se_train.describe()
test_desc = se_test.describe()
df_concat_desc = pd.concat([train_desc, test_desc, all_desc], axis=1, keys=['train', 'test', "all"])
if debug:
print("***[" + str_value + "]***")
print("describe :")
print(df_concat_desc)
num_nan_all = se_all.isna().sum()
num_nan_train = se_train.isna().sum()
num_nan_test = se_test.isna().sum()
df_concat_num_nan = pd.DataFrame([num_nan_train, num_nan_test, num_nan_all], columns=["num_of_nan"], index=['train', 'test', "all"]).transpose()
if debug:
print("Num of Nan : ")
print(df_concat_num_nan)
df_value = se_all.value_counts(dropna=False)
df_value_percentage = (df_value / df_value.sum()) * 100
df_value_train = se_train.value_counts(dropna=False)
df_value_train_percentage = (df_value_train / df_value_train.sum()) * 100
df_value_test = se_test.value_counts(dropna=False)
df_value_test_percentage = (df_value_test / df_value_test.sum()) * 100
df_concat = pd.concat([df_value_train, df_value_train_percentage, df_value_test, df_value_test_percentage, df_value, df_value_percentage], axis=1, keys=['train', "train rate", 'test', "test rate", "all", "all rate"], sort=True)
train_values = set(se_train.unique())
test_values = set(se_test.unique())
xor_values = test_values - train_values
if xor_values:
#print(f'Replace {len(xor_values)} in {col} column')
print(f'{xor_values} is only found in test, not train!!!')
#full_data.loc[full_data[col].isin(xor_values), col] = 'xor'
xor_values_train = train_values - test_values
if xor_values_train:
#print(f'Replace {len(xor_values)} in {col} column')
print(f'{xor_values_train} is only found in train, not test!!!' )
#full_data.loc[full_data[col].isin(xor_values), col] = 'xor'
if debug:
# plt.figure()
# venn2(subsets=[train_values,test_values],set_labels=('train','test'))
# plt.title(f'{str_value}',fontsize=20)
# path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(str_value))
# #print("save: ", path_to_save)
# plt.savefig(path_to_save)
# plt.show(block=False)
# plt.close()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_venn_{}.png".format(str_value))
createVenn(train_set=train_values, test_set=test_values, title_str=str_value, path_to_save=path_to_save, train_label="train", test_label="test")
print("value_counts :")
print(df_concat)
plt.figure(figsize=_fig_size)
df_graph = df_concat[['train', 'test', "all"]].reset_index()
df_graph = pd.melt(df_graph, id_vars=["index"], value_vars=['train', 'test', "all"])
sns.barplot(x='index', y='value', hue='variable', data=df_graph)
#sns.despine(fig)
#df_concat[['train', 'test', "all"]].dropna().plot.bar(figsize=_fig_size)
plt.ylabel('Number of each element', fontsize=12)
plt.xlabel(str_value, fontsize=12)
plt.xticks(rotation=90, size='small')
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_num_each_elments_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
plt.figure(figsize=_fig_size)
df_graph = df_concat[['train rate', 'test rate', "all rate"]].reset_index()
df_graph = pd.melt(df_graph, id_vars=["index"], value_vars=['train rate', 'test rate', "all rate"])
sns.barplot(x='index', y='value', hue='variable', data=df_graph)
#df_concat[['train rate', 'test rate', "all rate"]].plot.bar(figsize=_fig_size)
plt.ylabel('rate of each element', fontsize=12)
plt.xlabel(str_value, fontsize=12)
plt.xticks(rotation=90, size='small')
plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_rate_each_elments_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
if str_value != str_target and str_target in df.columns:
if regression_flag == 1:
if debug:
showBoxPlot(df_train, str_value, str_target)
else:
df_train_small = df.loc[df[str_target].isnull() == False, [str_value, str_target]]
df_stack = df_train_small.groupby(str_value)[str_target].value_counts().unstack()
if debug:
print("---")
col_list = []
df_list = []
if debug:
plt.figure(figsize=_fig_size)
g = sns.countplot(x=str_value, hue = str_target, data=df, order=df_stack.index)
plt.xticks(rotation=90, size='small')
ax1 = g.axes
ax2 = ax1.twinx()
for col in df_stack.columns:
col_list += [str(col), str(col)+"_percent"]
df_percent = (df_stack.loc[:, col] / df_stack.sum(axis=1))
df_list += [df_stack.loc[:, col], df_percent]
if debug:
#print(df_percent.index)
xn = range(len(df_percent.index))
sns.lineplot(x=xn, y=df_percent.values, ax=ax2)
#sns.lineplot(data=df_percent, ax=ax2)
#sns.lineplot(data=df_percent, y=(str(col)+"_percent"), x=df_percent.index)
df_conc = pd.concat(df_list, axis=1, keys=col_list)
if debug:
print(df_conc.T)
#print(df_stack.columns)
#print(df_stack.index)
#plt.tight_layout()
path_to_save = os.path.join(str(PATH_TO_GRAPH_DIR), datetime.now().strftime("%Y%m%d%H%M%S") + "_count_line_{}.png".format(str_value))
#print("save: ", path_to_save)
plt.savefig(path_to_save)
plt.show(block=False)
plt.close()
if debug:
print("******\n")
del df
gc.collect()
return df_concat
def showJointPlot(_df_train, _df_test, str_value, str_target, debug=True, regression_flag=1, corr_flag=False, empty_nums=[], log_flag=1, _fig_size=(20, 10)):
print("now in function : ", inspect.getframeinfo(inspect.currentframe())[2])
df_train = _df_train.copy()
df_test = _df_test.copy()
if str_value == str_target:
df_test[str_target] = np.nan
if len(empty_nums) >0:
for e in empty_nums:
df_train[str_value] = df_train[str_value].replace(e, np.nan)
df_test[str_value] = df_test[str_value].replace(e, np.nan)
if log_flag==1:
df_train[str_value] = np.log1p(df_train[str_value])
df_test[str_value] = np.log1p(df_test[str_value])
df = | pd.concat([df_train, df_test]) | pandas.concat |
import numpy as np
import os
import pandas as pd
import pyro
import torch
from pyro.distributions import Gamma, Normal
from tqdm import tqdm
from deepscm.datasets.morphomnist import load_morphomnist_like, save_morphomnist_like
from deepscm.datasets.morphomnist.transforms import SetThickness, SetSlant, ImageMorphology
def model_(n_samples=None):
with pyro.plate('observations', n_samples):
thickness = pyro.sample('thickness', Gamma(10., 5.))
loc = thickness * 6.
slant = pyro.sample('slant', Normal(loc, 1.))
return slant, thickness
def model(n_samples=None):
with pyro.plate('observations', n_samples):
thickness = pyro.sample('thickness', Gamma(10., 5.))
loc = (thickness - 2.5) * 20
slant = pyro.sample('slant', Normal(loc, 1.))
return slant, thickness
def gen_dataset(args, train=True):
pyro.clear_param_store()
images, labels, _ = load_morphomnist_like(args.data_dir, train=train)
mask = (labels == args.digit_class)
images = images[mask]
labels = labels[mask]
n_samples = len(images)
with torch.no_grad():
slant, thickness = model(n_samples)
metrics = | pd.DataFrame(data={'thickness': thickness, 'slant': slant}) | pandas.DataFrame |
from evalutils.exceptions import ValidationError
from evalutils.io import CSVLoader, FileLoader, ImageLoader
import json
import nibabel as nib
import numpy as np
import os.path
from pathlib import Path
from pandas import DataFrame, MultiIndex
import scipy.ndimage
from scipy.ndimage.interpolation import map_coordinates, zoom
from surface_distance import *
##### paths #####
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
##### metrics #####
def jacobian_determinant(disp):
_, _, H, W, D = disp.shape
gradx = np.array([-0.5, 0, 0.5]).reshape(1, 3, 1, 1)
grady = np.array([-0.5, 0, 0.5]).reshape(1, 1, 3, 1)
gradz = np.array([-0.5, 0, 0.5]).reshape(1, 1, 1, 3)
gradx_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradx, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradx, mode='constant', cval=0.0)], axis=1)
grady_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], grady, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], grady, mode='constant', cval=0.0)], axis=1)
gradz_disp = np.stack([scipy.ndimage.correlate(disp[:, 0, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 1, :, :, :], gradz, mode='constant', cval=0.0),
scipy.ndimage.correlate(disp[:, 2, :, :, :], gradz, mode='constant', cval=0.0)], axis=1)
grad_disp = np.concatenate([gradx_disp, grady_disp, gradz_disp], 0)
jacobian = grad_disp + np.eye(3, 3).reshape(3, 3, 1, 1, 1)
jacobian = jacobian[:, :, 2:-2, 2:-2, 2:-2]
jacdet = jacobian[0, 0, :, :, :] * (jacobian[1, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[1, 2, :, :, :] * jacobian[2, 1, :, :, :]) -\
jacobian[1, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[2, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[2, 1, :, :, :]) +\
jacobian[2, 0, :, :, :] * (jacobian[0, 1, :, :, :] * jacobian[1, 2, :, :, :] - jacobian[0, 2, :, :, :] * jacobian[1, 1, :, :, :])
return jacdet
def compute_tre(x, y, spacing):
return np.linalg.norm((x - y) * spacing, axis=1)
##### file loader #####
class NiftiLoader(ImageLoader):
@staticmethod
def load_image(fname):
return nib.load(str(fname))
@staticmethod
def hash_image(image):
return hash(image.get_fdata().tostring())
class NumpyLoader(ImageLoader):
@staticmethod
def load_image(fname):
return np.load(str(fname))['arr_0']
@staticmethod
def hash_image(image):
return hash(image.tostring())
class CURIOUSLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines()[5:]:
lms = [float(lm) for lm in line.split(' ')[1:-1]]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
class L2RLmsLoader(FileLoader):
def load(self, fname):
lms_fixed = []
lms_moving = []
f = open(fname, 'r')
for line in f.readlines():
lms = [float(lm) for lm in line.split(',')]
lms_fixed.append(lms[:3])
lms_moving.append(lms[3:])
return {'lms_fixed': lms_fixed, 'lms_moving': lms_moving}
##### validation errors #####
def raise_missing_file_error(fname):
message = (
f"The displacement field {fname} is missing. "
f"Please provide all required displacement fields."
)
raise ValidationError(message)
def raise_dtype_error(fname, dtype):
message = (
f"The displacement field {fname} has a wrong dtype ('{dtype}'). "
f"All displacement fields should have dtype 'float16'."
)
raise ValidationError(message)
def raise_shape_error(fname, shape, expected_shape):
message = (
f"The displacement field {fname} has a wrong shape ('{shape[0]}x{shape[1]}x{shape[2]}x{shape[3]}'). "
f"The expected shape of displacement fields for this task is {expected_shape[0]}x{expected_shape[1]}x{expected_shape[2]}x{expected_shape[3]}."
)
raise ValidationError(message)
##### eval val #####
class EvalVal():
def __init__(self):
self.ground_truth_path = DEFAULT_GROUND_TRUTH_PATH
self.predictions_path = DEFAULT_INPUT_PATH
self.output_file = DEFAULT_EVALUATION_OUTPUT_FILE_PATH
self.csv_loader = CSVLoader()
self.nifti_loader = NiftiLoader()
self.numpy_loader = NumpyLoader()
self.curious_lms_loader = CURIOUSLmsLoader()
self.l2r_lms_loader = L2RLmsLoader()
self.pairs_task_01 = DataFrame()
self.imgs_task_01 = DataFrame()
self.lms_task_01 = DataFrame()
self.disp_fields_task_01 = DataFrame()
self.cases_task_01 = DataFrame()
self.pairs_task_02 = DataFrame()
self.imgs_task_02 = DataFrame()
self.lms_task_02 = DataFrame()
self.disp_fields_task_02 = DataFrame()
self.cases_task_02 = DataFrame()
self.pairs_task_03 = DataFrame()
self.segs_task_03 = DataFrame()
self.disp_fields_task_03 = DataFrame()
self.cases_task_03 = DataFrame()
self.pairs_task_04 = DataFrame()
self.segs_task_04 = DataFrame()
self.disp_fields_task_04 = DataFrame()
self.cases_task_04 = DataFrame()
def evaluate(self):
self.load_task_01()
self.merge_ground_truth_and_predictions_task_01()
self.score_task_01()
self.load_task_02()
self.merge_ground_truth_and_predictions_task_02()
self.score_task_02()
self.load_task_03()
self.merge_ground_truth_and_predictions_task_03()
self.score_task_03()
self.load_task_04()
self.merge_ground_truth_and_predictions_task_04()
self.score_task_04()
self.save()
def load_task_01(self):
self.pairs_task_01 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'pairs_val.csv')
self.imgs_task_01 = self.load_imgs_task_01()
self.lms_task_01 = self.load_lms_task_01()
self.disp_fields_task_01 = self.load_disp_fields(self.pairs_task_01, DEFAULT_INPUT_PATH / 'task_01', np.array([3, 128, 128, 144]))
def load_task_02(self):
self.pairs_task_02 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'pairs_val.csv')
self.imgs_task_02 = self.load_imgs_task_02()
self.lms_task_02 = self.load_lms_task_02()
self.disp_fields_task_02 = self.load_disp_fields(self.pairs_task_02, DEFAULT_INPUT_PATH / 'task_02', np.array([3, 96, 96, 104]))
def load_task_03(self):
self.pairs_task_03 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'pairs_val.csv')
self.segs_task_03 = self.load_segs_task_03()
self.disp_fields_task_03 = self.load_disp_fields(self.pairs_task_03, DEFAULT_INPUT_PATH / 'task_03', np.array([3, 96, 80, 128]))
def load_task_04(self):
self.pairs_task_04 = self.load_pairs(DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'pairs_val.csv')
self.segs_task_04 = self.load_segs_task_04()
self.disp_fields_task_04 = self.load_disp_fields(self.pairs_task_04, DEFAULT_INPUT_PATH / 'task_04', np.array([3, 64, 64, 64]))
def load_imgs_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'NIFTI' / 'Case{}'.format(row['fixed']) / 'Case{}-FLAIR-resize.nii'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_imgs_task_02(self):
cases = None
for _, row in self.pairs_task_02.iterrows():
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_02' / 'training' / 'lungMasks' / 'case_{:03d}_exp.nii.gz'.format(row['fixed']))
if cases is None:
cases = case
index = [row['fixed']]
else:
cases += case
index += [row['fixed']]
return DataFrame(cases, index=index)
def load_segs_task_03(self):
cases = None
indices = []
for _, row in self.pairs_task_03.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_03' / 'Training' / 'label' / 'label{:04d}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_segs_task_04(self):
cases = None
indices = []
for _, row in self.pairs_task_04.iterrows():
indices.append(row['fixed'])
indices.append(row['moving'])
indices = np.array(indices)
for i in np.unique(indices):
case = self.nifti_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_04' / 'Training' / 'label' / 'hippocampus_{}.nii.gz'.format(i))
if cases is None:
cases = case
index = [i]
else:
cases += case
index += [i]
return DataFrame(cases, index=index)
def load_lms_task_01(self):
cases = None
for _, row in self.pairs_task_01.iterrows():
case = self.curious_lms_loader.load(fname=DEFAULT_GROUND_TRUTH_PATH / 'task_01' / 'EASY-RESECT' / 'landmarks' / 'Coordinates' / 'Case{}-MRI-beforeUS.tag'.format(row['fixed']))
if cases is None:
cases = [case]
index = [row['fixed']]
else:
cases += [case]
index += [row['fixed']]
return | DataFrame(cases, index=index) | pandas.DataFrame |
import os
import sys
import argparse
import pandas as pd
import numpy as np
### Version 3, created 12 August 2020 by <NAME> ###
### Reformats concatenated, headerless MELT vcf files, into the relevant information columns, with extraneous information/columns removed, ready to use in the duplicate-removal scripts
### This includes renaming MELT SPLIT hits to match the original TE names from RepeatMasker and the original TE library
def get_args():
#What this script does
parser = argparse.ArgumentParser(description="General removal of most MELT duplicate calls and overlapping calls", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
#Give input coordinate file of formatted MELT SPLIT hits
parser.add_argument('-s', '--split', help='filtered, unique, concatenated MELT SPLIT file', required=True)
#Argument of directory containing the formatted MELT DELETION hits
parser.add_argument('-del', '--deletion', help='filtered, unique, concatenated MELT DELETION file', required=True)
#Argument of directory containing the list of formatted MELT hits (need full path)
parser.add_argument('-d', '--directory', type=str, help='Path to the directory of the input file', default=".")
#Argument of the output directory (need full path)
parser.add_argument('-od', '--outdir', type=str, help='Location of directory for the output file', default=".")
#Argument of the output directory (need full path)
parser.add_argument('-mt', '--melttes', type=str, help='Path to list of MELT ZIP basenames (MELT compatible TE names = zip_te_names.txt)', required=True)
#Argument of the output directory (need full path)
parser.add_argument('-lib', '--telibrary', type=str, help='Path to the list of RepeatMasker TE names (te_list.txt)', required=True)
#Argument of the output directory (need full path)
parser.add_argument('-tecat', '--tecategories', type=str, help='Path to the list of RepeatMasker TE names, with TE family category (te_lib_categories.txt)', required=True)
args = parser.parse_args()
SPLIT = args.split
DELETION = args.deletion
DIR = args.directory
OUTDIR = args.outdir
SPLIT_TE_NAMES = args.melttes
TE_NAMES = args.telibrary
CAT_TE_NAMES = args.tecategories
return SPLIT, DELETION, DIR, OUTDIR, SPLIT_TE_NAMES, TE_NAMES, CAT_TE_NAMES
SPLIT, DELETION, DIR, OUTDIR, SPLIT_TE_NAMES, TE_NAMES, CAT_TE_NAMES = get_args()
if DIR == ".":
DIR = os.getcwd()
if OUTDIR == ".":
OUTDIR = os.getcwd()
BASENAME = os.path.basename(SPLIT).split("_SPLIT")[0]
SPLIT_HITS = os.path.join(DIR, SPLIT)
DEL_HITS = os.path.join(DIR, DELETION)
OUTBASE = os.path.join(OUTDIR, BASENAME)
OUTPUT1 = OUTBASE + "_cat_assess_dups_headers.bed"
OUTPUT2 = OUTBASE + "_cat_assess_dups.bed"
HEADERS1 = ['#CHROM', 'POS', 'END', 'ASSESS', 'SVTYPE', 'SVLENGTH', 'ORIENTATION', 'Austroriparius', 'Brandtii', 'Ciliolabrum', 'Davidii', 'Myotis', 'Occultus', 'Sept_TTU', 'Thysanodes', 'Velifer', 'Vivesi', 'Yumanensis', 'MODULE']
HEADERS2 = ['#CHROM', 'POS', 'END', 'SVTYPE', 'SVLENGTH', 'ORIENTATION', 'Austroriparius', 'Brandtii', 'Ciliolabrum', 'Davidii', 'Myotis', 'Occultus', 'Sept_TTU', 'Thysanodes', 'Velifer', 'Vivesi', 'Yumanensis', 'MODULE']
SPLIT_DF = | pd.read_csv(SPLIT_HITS, sep='\t', names=HEADERS1) | pandas.read_csv |
"""
plotting functions for N2 experiments.
"""
# std lib
import logging
logger = logging.getLogger(__name__)
## local
from elchempy.plotters.plot_helpers import PlotterMixin
## for developing and testing
# from elchempy.experiments._dev_datafiles._dev_fetcher import get_files
## constants
from elchempy.constants import EvRHE
### 3rd party
import pandas as pd
import matplotlib.pyplot as plt
#%%
class N2_Plotter(PlotterMixin):
# sweep_type_mapper = {'ls' : {'anodic' : '-.', 'cathodic' : '-', 'chrono' : ':'}}
def plot_all_scans_scanrate(
self,
plot_data=None,
xlabel=EvRHE,
ylabel="j_mA_cm2",
max_seg_only=True,
savepath=None,
):
if not isinstance(plot_data, pd.DataFrame):
plot_data = self.data_selection
fig, ax = plt.subplots(figsize=(8, 8))
for sr, sgr in plot_data.groupby("scanrate"):
max_seg = max(sgr["Segment #"].unique())
if max_seg_only:
sgr = sgr.loc[sgr["Segment #"] == max_seg]
for swp, swpgrp in sgr.groupby("SweepType"):
ax.plot(
swpgrp[xlabel],
swpgrp[ylabel],
label=f"{swp} {sr} mV, seg {max_seg}",
linestyle=self.sweep_type_mapper["ls"][swp],
)
# ax.legend(True)
fig.suptitle(
f"{self.filepath.parent.name}\n{self.filepath.name}\nData({len(plot_data)})"
)
ax.legend()
ax.set_ylabel("$j \//\/mA/cm^{2}$")
ax.set_xlabel("$E \//\/V_{RHE}$")
if savepath:
plt.savefig(savepath, dpi=100, bbox_inches="tight")
plt.close()
return
def plot_Cdl_sweeptype_scatter(Cdl_pars, **kwargs):
# SampleID, ScanRates, Cdl_fit, Cdl_cath_slice, Cdl_an_slice, N2_dest_dir, N2_fn
EvRHE = "E_AppV_RHE"
fig, ax = plt.subplots()
Cdl_cath_slice = Cdl_pars.query('SweepType == "cathodic"')
Cdl_an_slice = Cdl_pars.query('SweepType == "anodic"')
plt.title(
"%s made with linear fit of\n %s (R=%.3f)"
% (
kwargs.get("SampleID"),
kwargs.get("scanrates"),
Cdl_pars["lin_rvalue"].mean(),
)
)
ylim = (0, 1.2 * Cdl_pars.Cdl.max())
Cdl_cath_slice.plot(
x=EvRHE,
y="Cdl_corr",
kind="scatter",
ylim=ylim,
color="orange",
ax=ax,
label="Cdl_Cath_corr",
)
Cdl_cath_slice.plot(
x=EvRHE,
y="Cdl",
kind="scatter",
ylim=ylim,
color="r",
ax=ax,
label="Cdl_Cath",
)
Cdl_an_slice.plot(
x=EvRHE,
y="Cdl_corr",
kind="scatter",
ylim=ylim,
color="c",
ax=ax,
label="Cdl_Anod_corr",
)
Cdl_an_slice.plot(
x=EvRHE,
y="Cdl",
kind="scatter",
ylim=ylim,
ax=ax,
label="Cdl_Anod",
)
if kwargs.get("savepath"):
plt.savefig(
N2_dest_dir.joinpath(f"Cdl_{N2_fn}.png"), dpi=100, bbox_inches="tight"
)
| pd.concat([Cdl_an_slice, Cdl_cath_slice], sort=False, axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 10:16:42 2021
@author: tungbioinfo
"""
import argparse
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import time
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import pickle
import os, sys
from joblib import Parallel, delayed
import PCA_Analysis as pca
import RF_Analysis_Multiclass as rfc
import RF_Analysis_Binary as rfb
from Auto_ML_Multiclass import AutoML_classification
###############################################################################
############################## Read data set ##################################
###############################################################################
rumi = pd.read_csv("rumi.csv")
rumi = rumi.drop(rumi[rumi["Depressiongroup"]==1].index, axis=0).reset_index(drop=True)
depre_gr = rumi["Depressiongroup"].apply(lambda x: "BPD"
if x == 2 else "H"
if x == 0 else "MDD")
sex = rumi["Gender_1_male"].apply(lambda x: 0 if x == 2 else 1)
rumi = rumi.drop(columns = ["Depressiongroup", "Gender_1_male"])
rumi = pd.concat([depre_gr, sex, rumi], axis = 1)
rumi = shuffle(rumi).reset_index(drop=True)
rumi_meta = rumi[['MRI_expID', 'MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration']]
rumi_meta = rumi_meta.set_index('MRI_expID')
sns.pairplot(rumi_meta[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'Depressiongroup']],
hue="Depressiongroup")
rumi_meta_bdp = rumi_meta.loc[rumi_meta['Depressiongroup'] == "BPD"]
rumi_meta_mdd = rumi_meta.loc[rumi_meta['Depressiongroup'] == 'MDD']
sns.pairplot(rumi_meta_bdp[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
sns.pairplot(rumi_meta_mdd[['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination', 'RRS_Total', 'CurrentDepression']],
hue="CurrentDepression")
rumi_region = rumi.drop(columns = ['MRI_ordID', 'CurrentDepression', 'Depressiongroup', 'TIV',
'Age', 'Gender_1_male', 'BDI_Total', 'RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination',
'RRS_Total', 'Dep_PastEpisodes', 'Dep_Duration'])
rumi_region = rumi_region.set_index('MRI_expID')
rumi_region_T = rumi_region.T
rumi_region_bdp = rumi_region.loc[rumi_meta_bdp.index]
rumi_region_mdd = rumi_region.loc[rumi_meta_mdd.index]
y = rumi_meta["Depressiongroup"].apply(lambda x: 0
if x == "MDD" else 1
if x == "BPD" else 2)
class_name = ["MDD", "BPD", 'Healthy']
X_train, X_test, y_train, y_test = train_test_split(rumi_region, y, test_size=0.3, random_state=42)
###############################################################################
######################## Step 1 - Run Auto_ML #################################
###############################################################################
automl = AutoML_classification()
result = automl.fit(X_train, y_train, X_test, y_test)
###############################################################################
################### Step 2 - Run selected models ##############################
###############################################################################
log_best, _, _, _, _ = automl.LogisticRegression(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(log_best, X_train, y_train, X_test, y_test,
model = "Logistics_regression", num_class=3, class_name = class_name)
sgd_best, _, _, _, _ = automl.Stochastic_Gradient_Descent(X_train, y_train, X_test, y_test)
evaluate_dt = automl.evaluate_multiclass(sgd_best, X_train, y_train, X_test, y_test,
model = "Stochastic_Gradient_Descent", num_class=3, class_name = class_name)
rf_best, _, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
evaluate_rf = automl.evaluate_multiclass(rf_best, X_train, y_train, X_test, y_test,
model = "Random Forest", num_class=3, top_features=20, class_name = class_name)
###############################################################################
########## Step 3.1 - Run forward algorithm + Random Forest ###################
###############################################################################
import itertools
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_validate
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold, RepeatedStratifiedKFold, RepeatedKFold
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV
from xgboost import XGBClassifier
from datetime import datetime as dt
import warnings
warnings.filterwarnings("ignore")
st_t = dt.now()
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
base_model_rf = RandomForestClassifier(criterion = "gini", random_state=42)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=cv_timeSeries,
cv=2,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = pd.concat([c, a, f["All"]], axis=1)
all_info.columns = ['Num_feature', 'Accuracy', 'Feature']
all_info = all_info.sort_values(by='Accuracy', ascending=False).reset_index(drop=True)
all_info.to_csv("CDI_subset_accuracy.csv", index=False)
f.to_csv("CDI_subset.csv")
with open("CDI_models.txt", "wb") as fp:
pickle.dump(all_model, fp)
###############################################################################
################# Step 3.1 - Run forward algorithm + SGD ######################
###############################################################################
from sklearn.linear_model import SGDClassifier
st_t = dt.now()
n_samples, n_features = X_train.shape
# Loss function
loss = ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"]
penalty = ["l2", "l1", "elasticnet"]
# The higher the value, the stronger the regularization
alpha = np.logspace(-7, -1, 100)
# The Elastic Net mixing parameter
l1_ratio = np.linspace(0, 1, 100)
epsilon = np.logspace(-5, -1, 100)
learning_rate = ["constant", "optimal", "invscaling", "adaptive"]
eta0 = np.logspace(-7, -1, 100)
hyperparameter = {"loss": loss,
"penalty": penalty,
"alpha": alpha,
"l1_ratio": l1_ratio,
"epsilon": epsilon,
"learning_rate": learning_rate,
"eta0": eta0}
model = SGDClassifier(n_jobs = -1)
n_iter_search = 30
scoring = "accuracy"
n_selected_features = 240
# selected feature set, initialized to be empty
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator = model,
param_distributions = hyperparameter,
cv = 2,
scoring = scoring,
n_iter = n_iter_search,
n_jobs = -1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if acc > max_acc:
max_acc = acc
idx = i
best_model = best_estimator
F.append(idx)
count += 1
print("The current number of features: {} - Accuracy: {}%".format(count, round(max_acc*100, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
###############################################################################
######## Step 4.1 - Run forward algorithm + Random_Forest_regression ##########
###############################################################################
from Auto_ML_Regression import AutoML_Regression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error
from sklearn.metrics import mean_absolute_percentage_error
import math
y = rumi_meta["RRS_Brooding"]
rumi_region_plus = pd.concat([rumi_meta[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region], axis=1)
#-------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_bdp_plus = pd.concat([rumi_meta_bdp[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_bdp], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_bdp_plus, y, test_size=0.3, random_state=42)
# ------
y = rumi_meta_bdp["BDI_Total"]
rumi_region_mdd_plus = pd.concat([rumi_meta_mdd[['CurrentDepression', 'TIV', 'Age','Gender_1_male']],
rumi_region_mdd], axis=1)
X_train, X_test, y_train, y_test = train_test_split(rumi_region_mdd_plus, y, test_size=0.3, random_state=42)
# ------
ress_BPD_brain = pd.read_csv("BPD_brain.csv", header=None)
ress_BPD_brain.columns = rumi_region.columns
ress_BPD_meta = pd.read_csv("BPD_rrs.csv", header=None)
ress_BPD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_BPD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(BPD_subset, y, test_size=0.3, random_state=42)
# ------
ress_MDD_brain = pd.read_csv("MDD_brain.csv", header=None)
ress_MDD_brain.columns = rumi_region.columns
ress_MDD_meta = pd.read_csv("MDD_rrs.csv", header=None)
ress_MDD_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_MDD_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_MDD_brain, y, test_size=0.3, random_state=42)
# ------
ress_HC_brain = pd.read_csv("Health_brain.csv", header=None)
ress_HC_brain.columns = rumi_region.columns
ress_HC_meta = pd.read_csv("Health_rrs.csv", header=None)
ress_HC_meta.columns = ['RRS_Brooding', 'RRS_Reflection', 'RRS_DepressiveRumination','RRS_Total']
y = ress_HC_meta["RRS_Brooding"]
X_train, X_test, y_train, y_test = train_test_split(ress_HC_brain, y, test_size=0.3, random_state=42)
automl = AutoML_Regression()
result = automl.fit(X_train, y_train, X_test, y_test)
result.to_csv("AutoML_RRS_total_rumi_region_plus.csv", index = False)
ress_BPD_meta["Label"] = "BPD"
ress_MDD_meta["Label"] = "MDD"
ress_HC_meta["Label"] = "HC"
ress = pd.concat([ress_BPD_meta, ress_MDD_meta, ress_HC_meta]).reset_index(drop=True)
sns.pairplot(ress, hue="Label")
#------------------------------------------------------------------------------
automl = AutoML_Regression()
lasso_best, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test)
lasso_best.fit(X_train, y_train)
y_pred = lasso_best.predict(X_test)
plt.scatter(y_pred, y_test, s=8)
plt.plot([min(y_pred), max(y_pred)], [min(y_test), max(y_test)], '--k')
plt.ylabel('True RRS_total')
plt.xlabel('Predicted RRS_total')
#plt.text(s='Random Forest without Forward varible', x=1,
# y=2, fontsize=12, multialignment='center')
plt.text(min(y_pred), max(y_test) - 5, r'$R^2$ = %.2f' % (r2_score(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 10, r'MSE = %.2f' % (mean_squared_error(y_test, y_pred)))
plt.text(min(y_pred), max(y_test) - 15, r'Accuracy = %.2f %' % (100 - 100*mean_absolute_percentage_error(y_test, y_pred)))
#plt.ticklabel_format(axis="both", style="sci", scilimits=(0, 0))
errors = abs(y_pred - y_test)
mean_err = np.stack(errors/y_test)
mean_err = mean_err[np.isfinite(mean_err)]
mape = 100 * np.mean(mean_err)
acc = 100 - mape
#------------------------------------------------------------------------------
n_samples, n_features = X_train.shape
n_estimators = [5, 10, 50, 100, 150, 200, 250, 300]
max_depth = [5, 10, 25, 50, 75, 100]
min_samples_leaf = [1, 2, 4, 8, 10]
min_samples_split = [2, 4, 6, 8, 10]
max_features = ["auto", "sqrt", "log2", None]
hyperparameter = {'n_estimators': n_estimators,
'max_depth': max_depth,
'min_samples_leaf': min_samples_leaf,
'min_samples_split': min_samples_split,
'max_features': max_features,
}
my_cv = RepeatedKFold(n_splits=10, n_repeats=10, random_state=42)
base_model_rf = RandomForestRegressor(criterion = "mse", random_state=42)
n_iter_search = 30
scoring = "neg_mean_squared_error"
n_selected_features = 240
F = []
count = 0
ddict = {}
all_F = []
all_c = []
all_acc = []
all_mse = []
all_model = []
start = time.time()
while count < n_selected_features:
max_acc = 0
min_err = np.inf
time_loop = time.time()
for i in X_train.columns:
if i not in F:
F.append(i)
X_train_tmp = X_train[F]
acc = 0
rsearch_cv = RandomizedSearchCV(estimator=base_model_rf,
random_state=42,
param_distributions=hyperparameter,
n_iter=n_iter_search,
#cv=my_cv,
cv=5,
scoring=scoring,
n_jobs=-1)
rsearch_cv.fit(X_train_tmp, y_train)
best_estimator = rsearch_cv.best_estimator_
y_pred = best_estimator.predict(X_test[F])
mse = mean_squared_error(y_test, y_pred)
#acc = metrics.accuracy_score(y_test, y_pred)
F.pop()
if mse < min_err:
min_err = mse
idx = i
best_model = best_estimator
#errors = abs(y_pred - y_test)
#mean_err = np.stack(errors/y_test)
#mean_err = mean_err[np.isfinite(mean_err)]
mape = mean_absolute_percentage_error(y_test, y_pred)
max_acc = 100 - (100*mape)
F.append(idx)
count += 1
print("The current number of features: {} - MSE: {}".format(count, round(min_err, 2)))
print("Time for computation: {}".format(time.time() - time_loop))
all_F.append(np.array(F))
all_c.append(count)
all_acc.append(max_acc)
all_model.append(best_model)
all_mse.append(min_err)
c = pd.DataFrame(all_c)
a = pd.DataFrame(all_acc)
f = pd.DataFrame(all_F)
e = pd.DataFrame(all_mse)
f["All"] = f[f.columns[0:]].apply(
lambda x: ', '.join(x.dropna().astype(str)), axis=1)
all_info = | pd.concat([c, e, a, f["All"]], axis=1) | pandas.concat |
import matplotlib
matplotlib.use('Agg')
from Swing.util.BoxPlot import BoxPlot
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import pdb
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import os
import time
from Swing.util.mplstyle import style1
import seaborn as sns
from palettable.colorbrewer.qualitative import Set1_3
def get_df(df, fp, min_lag, max_lag, td_window, inftype = "RandomForest"):
new_df = df[(df['file_path'] == fp) & (df['min_lag'] == min_lag) & (df['max_lag'] == max_lag) & (df['td_window'] == td_window) & (df['InfType'] == inftype)]
return(new_df)
def load_data():
input_folder_list = ["/projects/p20519/roller_output/gnw/RandomForest/"]
agg_df_RF = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_RF['InfType'] = 'RandomForest'
input_folder_list = ["/projects/p20519/roller_output/gnw/Dionesus/"]
agg_df_P = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_P['InfType'] = 'PLSR'
input_folder_list = ["/projects/p20519/roller_output/gnw/Lasso/"]
agg_df_L = read_tdr_results(input_folder_list, folder_str = "2017-09")
agg_df_L['InfType'] = 'Lasso'
all_dfs = [agg_df_RF, agg_df_P, agg_df_L]
merged_df = pd.concat(all_dfs)
return(merged_df)
def read_tdr_results(folder_list, folder_str):
agg_df = pd.DataFrame()
for input_folder in folder_list:
for file_path in os.listdir(input_folder):
if folder_str in file_path:
df = pd.read_csv(input_folder+file_path,sep='\t', engine='python')
# check if the columns are misaligned.
if type(df['permutation_n'].iloc[0]) is str:
new_col = df.columns.tolist()
new_col.pop(0)
new_df = df.iloc[:,0:len(df.iloc[0])-1]
new_df.columns = new_col
df=new_df
agg_df = agg_df.append(df)
return(agg_df)
def get_inf_df(network_1, inf_type):
RFnet1 = network_1[network_1['InfType'] == inf_type]
RFn1 = RFnet1.groupby('td_window').mean()
return(RFn1)
def get_comparisons(merged_df, inftypes, window_sizes, network_list):
overall_df = pd.DataFrame()
network_1_df = pd.DataFrame()
for inftype in inftypes:
for td_window in window_sizes:
for network in network_list:
baseline = get_df(merged_df, network, 0, 0, 21, inftype = inftype)
if len(baseline) == 0:
continue
if 21-td_window > 2:
max_lag = 3
else:
max_lag = 21-td_window
if (td_window == 21):
min_lag = 0
max_lag = 0
else:
min_lag = 1
comparisons = get_df(merged_df, network, min_lag, max_lag, td_window, inftype = inftype)
if len(comparisons) == 0:
continue
# for each statistic, get the percent difference to the baseline comparison.
stat = 'aupr'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
stat = 'auroc'
baseline_mean=baseline[stat].mean()
comparisons['percent_{}'.format(stat)] = ((comparisons[stat]-baseline_mean)/baseline_mean)*100
overall_df = overall_df.append(comparisons.iloc[0:20,:], ignore_index = True)
if network == network_list[6]:
network_1_df = network_1_df.append(comparisons.iloc[0:20,:], ignore_index = True)
print(comparisons,len(comparisons))
return(overall_df, network_1_df)
test_statistic = ['aupr', 'auroc']
save_tag = "window_scan"
n_trials = 100
#merged_df = load_data()
#merged_df.to_pickle("merged_window_scan.pkl")
#merged_df = pd.read_pickle("merged_window_scan.pkl")
#network_list = merged_df['file_path'].unique().tolist()
#window_sizes = range(2,22)
#inftypes = ['RandomForest', 'Lasso', 'PLSR']
#overall_df, network_1 = get_comparisons(merged_df, inftypes, window_sizes, network_list)
#overall_df.to_pickle("merged_window_scan_comparisons.pkl")
#network_1.to_pickle("merged_window_scan_comparisons_network1.pkl")
overall_df = pd.read_pickle("merged_window_scan_comparisons.pkl")
network_1 = | pd.read_pickle("merged_window_scan_comparisons_network1.pkl") | pandas.read_pickle |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self, setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '{beg_dt}' & index >= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self, setup_path):
# GH 3499, losing frequency info on index recreation
df = DataFrame(
dict(A=Series(range(3), index=date_range("2000-1-1", periods=3, freq="H")))
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "data")
store.put("data", df, format="table")
result = store.get("data")
tm.assert_frame_equal(df, result)
for attr in ["freq", "tz", "name"]:
for idx in ["index", "columns"]:
assert getattr(getattr(df, idx), attr, None) == getattr(
getattr(result, idx), attr, None
)
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("data", df2)
assert store.get_storer("data").info["index"]["freq"] is None
# this is ok
_maybe_remove(store, "df2")
df2 = DataFrame(
dict(
A=Series(
range(3),
index=[
Timestamp("20010101"),
Timestamp("20010102"),
Timestamp("20020101"),
],
)
)
)
store.append("df2", df2)
df3 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
store.append("df2", df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self, setup_path):
with ensure_clean_path(setup_path) as path:
with catch_warnings(record=True):
df = DataFrame(
dict(
A=Series(
range(3), index=date_range("2000-1-1", periods=3, freq="H")
)
)
)
df.to_hdf(path, "data", mode="w", append=True)
df2 = DataFrame(
dict(
A=Series(
range(3), index=date_range("2002-1-1", periods=3, freq="D")
)
)
)
df2.to_hdf(path, "data", append=True)
idx = date_range("2000-1-1", periods=3, freq="H")
idx.name = "foo"
df = DataFrame(dict(A=Series(range(3), index=idx)))
df.to_hdf(path, "data", mode="w", append=True)
assert read_hdf(path, "data").index.name == "foo"
with catch_warnings(record=True):
idx2 = date_range("2001-1-1", periods=3, freq="H")
idx2.name = "bar"
df2 = DataFrame(dict(A=Series(range(3), index=idx2)))
df2.to_hdf(path, "data", append=True)
assert read_hdf(path, "data").index.name is None
def test_frame_select(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
with pytest.raises(ValueError):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(self, setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
tm.assert_frame_equal(result, expected)
# or
result = store.select("df", 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select(
"df", "(index>df.index[3] & " 'index<=df.index[6]) | string="bar"'
)
expected = df.loc[
((df.index > df.index[3]) & (df.index <= df.index[6]))
| (df.string == "bar")
]
tm.assert_frame_equal(result, expected)
# invert
result = store.select("df", 'string!="bar"')
expected = df.loc[df.string != "bar"]
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
with pytest.raises(NotImplementedError):
store.select("df", '~(string="bar")')
# invert ok for filters
result = store.select("df", "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(["A", "B"])]
tm.assert_frame_equal(result, expected)
# in
result = store.select("df", "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self, setup_path):
with ensure_clean_path(["parms.hdf", "hist.hdf"]) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({"A": [1, 1, 2, 2, 3]})
parms.to_hdf(pp, "df", mode="w", format="table", data_columns=["A"])
selection = read_hdf(pp, "df", where="A=[2,3]")
hist = DataFrame(
np.random.randn(25, 1),
columns=["data"],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5) for j in range(5)], names=["l1", "l2"]
),
)
hist.to_hdf(hh, "df", mode="w", format="table")
expected = read_hdf(hh, "df", where="l1=[2, 3, 4]")
# scope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select("df", where="l1=l")
tm.assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, "df", where="l1=l")
tm.assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, "df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = read_hdf(hh, "df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
# scope with index
store = HDFStore(hh)
result = store.select("df", where="l1=index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=selection.index.tolist()")
tm.assert_frame_equal(result, expected)
result = store.select("df", where="l1=list(selection.index)")
tm.assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self, setup_path):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
# not implemented
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A'] | columns=['B']")
# in theory we could deal with this
with pytest.raises(NotImplementedError):
store.select("df", "columns=['A','B'] & columns=['C']")
def test_string_select(self, setup_path):
# GH 2973
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df["x"] = "none"
df.loc[2:7, "x"] = ""
store.append("df", df, data_columns=["x"])
result = store.select("df", "x=none")
expected = df[df.x == "none"]
tm.assert_frame_equal(result, expected)
result = store.select("df", "x!=none")
expected = df[df.x != "none"]
tm.assert_frame_equal(result, expected)
df2 = df.copy()
df2.loc[df2.x == "", "x"] = np.nan
store.append("df2", df2, data_columns=["x"])
result = store.select("df2", "x!=none")
expected = df2[isna(df2.x)]
tm.assert_frame_equal(result, expected)
# int ==/!=
df["int"] = 1
df.loc[2:7, "int"] = 2
store.append("df3", df, data_columns=["int"])
result = store.select("df3", "int=2")
expected = df[df.int == 2]
tm.assert_frame_equal(result, expected)
result = store.select("df3", "int!=2")
expected = df[df.int != 2]
tm.assert_frame_equal(result, expected)
def test_read_column(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError, match="No object named df in the file"):
store.select_column("df", "index")
store.append("df", df)
# error
with pytest.raises(
KeyError, match=re.escape("'column [foo] not found in the table'")
):
store.select_column("df", "foo")
with pytest.raises(Exception):
store.select_column("df", "index", where=["index>5"])
# valid
result = store.select_column("df", "index")
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
with pytest.raises(ValueError):
store.select_column("df", "values_block_0")
# a data column
df2 = df.copy()
df2["string"] = "foo"
store.append("df2", df2, data_columns=["string"])
result = store.select_column("df2", "string")
tm.assert_almost_equal(result.values, df2["string"].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3["string"] = "foo"
df3.loc[4:6, "string"] = np.nan
store.append("df3", df3, data_columns=["string"])
result = store.select_column("df3", "string")
tm.assert_almost_equal(result.values, df3["string"].values)
# start/stop
result = store.select_column("df3", "string", start=2)
tm.assert_almost_equal(result.values, df3["string"].values[2:])
result = store.select_column("df3", "string", start=-2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
result = store.select_column("df3", "string", stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[:2])
result = store.select_column("df3", "string", stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
result = store.select_column("df3", "string", start=2, stop=-2)
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
result = store.select_column("df3", "string", start=-2, stop=2)
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({"A": np.random.randn(10), "B": "foo"})
store.append("df4", df4, data_columns=True)
expected = df4["B"]
result = store.select_column("df4", "B")
tm.assert_series_equal(result, expected)
def test_coordinates(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append("df", df)
# all
c = store.select_as_coordinates("df")
assert (c.values == np.arange(len(df.index))).all()
# get coordinates back & test vs frame
_maybe_remove(store, "df")
df = DataFrame(dict(A=range(5), B=range(5)))
store.append("df", df)
c = store.select_as_coordinates("df", ["index<3"])
assert (c.values == np.arange(3)).all()
result = store.select("df", where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates("df", ["index>=3", "index<=4"])
assert (c.values == np.arange(2) + 3).all()
result = store.select("df", where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, "df1")
_maybe_remove(store, "df2")
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
c = store.select_as_coordinates("df1", ["A>0", "B>0"])
df1_result = store.select("df1", c)
df2_result = store.select("df2", c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(setup_path) as store:
df = DataFrame(
np.random.randn(1000, 2), index=date_range("20000101", periods=1000)
)
store.append("df", df)
c = store.select_column("df", "index")
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select("df", where=where)
tm.assert_frame_equal(result, expected)
# invalid
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df), dtype="float64"))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df) + 1))
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5)
with pytest.raises(ValueError):
store.select("df", where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range("20000101", periods=500)
result = store.select("df", where="index in selection")
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append("df2", df)
result = store.select("df2", where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select("df2", where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select("df2", start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# exceptions
with pytest.raises(ValueError):
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
)
with pytest.raises(ValueError):
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
with pytest.raises(ValueError):
store.append_to_multiple("df1", df, "df1")
# regular operation
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1"
)
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
)
result = store.select_as_multiple(["df1", "df2"])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
@pytest.mark.xfail(
run=False, reason="append_to_multiple_dropna_false is not raising as failed"
)
def test_append_to_multiple_dropna_false(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(setup_path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
)
with pytest.raises(ValueError):
store.select_as_multiple(["df1a", "df2a"])
assert not store.select("df1a").index.equals(store.select("df2a").index)
def test_select_as_multiple(self, setup_path):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns="{}_2".format)
df2["foo"] = "bar"
with ensure_clean_store(setup_path) as store:
# no tables stored
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
store.append("df1", df1, data_columns=["A", "B"])
store.append("df2", df2)
# exceptions
with pytest.raises(Exception):
store.select_as_multiple(None, where=["A>0", "B>0"], selector="df1")
with pytest.raises(Exception):
store.select_as_multiple([None], where=["A>0", "B>0"], selector="df1")
msg = "'No object named df3 in the file'"
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
with pytest.raises(KeyError, match=msg):
store.select_as_multiple(["df3"], where=["A>0", "B>0"], selector="df1")
with pytest.raises(KeyError, match="'No object named df4 in the file'"):
store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df4"
)
# default select
result = store.select("df1", ["A>0", "B>0"])
expected = store.select_as_multiple(
["df1"], where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
"df1", where=["A>0", "B>0"], selector="df1"
)
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
["df1", "df2"], where="index>df2.index[4]", selector="df2"
)
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test exception for diff rows
store.append("df3", tm.makeTimeDataFrame(nper=50))
with pytest.raises(ValueError):
store.select_as_multiple(
["df1", "df3"], where=["A>0", "B>0"], selector="df1"
)
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion("3.1.0"),
reason=("tables version does not support fix for nan selection bug: GH 4858"),
)
def test_nan_selection_bug_4858(self, setup_path):
with | ensure_clean_store(setup_path) | pandas.tests.io.pytables.common.ensure_clean_store |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import multiprocessing
import random
from threading import Thread
import botocore
from django.contrib import auth
from django.contrib.auth import authenticate
from django.shortcuts import render
from django.template import RequestContext
from django.utils.datetime_safe import datetime
from django.views.decorators.csrf import csrf_exempt
import json
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer as wn
from django.http import HttpResponse, JsonResponse
import re
from openpyxl import load_workbook
from openpyxl.writer.excel import save_virtual_workbook
from apollo.Lib.collections import Counter
from apollo4.ComputeOptimalParameters import getOptimalParameterForMNB_alpha, getOptimalParameterForLR_alpha, \
getOptimalParameterForSVM_alpha, getOptimalParameterForOVRMNB_alpha, getOptimalParameterForOVRLR_alpha, \
getOptimalParameterForOVRSVM_alpha, getBestModelAndHyperParameters
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import KFold
from sklearn import preprocessing
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import numpy as np
import glob
import copy
from dateutil.relativedelta import relativedelta
import os, sys, signal
from os.path import splitext
import uuid
import apollo4.globals
import boto3
from elasticsearch import Elasticsearch
from apollo4.connection import MyConnection
import pickle
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import TruncatedSVD
from zipfile import ZipFile
import io
from django.contrib.auth.decorators import login_required
from django.conf import settings
from array import array
import copy
stopwords = ["i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "you're", "you've", "you'll", "you'd",
"your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "she's", "her", "hers",
"herself", "it", "it's", "its", "itself", "they", "them", "their", "theirs", "themselves", "what", "which",
"who", "whom", "this", "that", "that'll", "these", "those", "am", "is", "are", "was", "were", "be", "been",
"being",
"have", "has", "had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but", "if", "or",
"because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against", "between", "into",
"through",
"during", "before", "after", "above", "below", "to", "from", "up", "down", "in", "out", "on", "off",
"over", "under", "again", "further", "then", "once", "here", "there", "when", "where", "why", "how", "all",
"any", "both",
"each", "few", "more", "most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so",
"than", "too", "very", "s", "t", "can", "will", "just", "don", "don't", "should", "should've", "now", "d",
"ll", "m",
"o", "re", "ve", "y", "ain", "aren", "aren't", "couldn", "couldn't", "didn", "didn't", "doesn", "doesn't",
"hadn", "hadn't", "hasn", "hasn't", "haven", "haven't", "isn", "isn't", "ma", "mightn", "mightn't",
"mustn", "mustn't",
"needn", "needn't", "shan", "shan't", "shouldn", "shouldn't", "wasn", "wasn't", "weren", "weren't", "won",
"won't", "wouldn", "wouldn't"]
@csrf_exempt
@login_required(login_url='/login/')
def sl(request):
return render(request, 'apollo4/spotlight.html')
def Spotlight_Top_Papers(df):
try:
# if request.method == 'POST':
# userName = request.user.username;
# inputData = request.FILES.getlist('file')
# inputDataArray = []
# altogether = None
# titleRecords = []
# for files in inputData:
# inputrecords = pd.read_csv(files, header=0, encoding='unicode_escape')
# title = inputrecords.get('Title')
# titleRecords.append(title)
# inputDataArray.append(inputrecords)
#
# print(titleRecords)
# df = pd.read_csv()
# df.head(5)
# # Get topk from GUI (e.g., top 5, top 10, top 15, or top 20)
topk = 10
num_references = df['Cited by']
titles = df['Title']
years = df['Year']
authors = df['Authors']
affiliations = df['Affiliations']
topkindices = np.argsort(list(num_references))[::-1]
topkindices_cleaned = []
counter = 0
while len(topkindices_cleaned) < topk:
if not np.isnan(num_references[topkindices[counter]]):
topkindices_cleaned.append(topkindices[counter])
counter += 1
num_references[topkindices_cleaned]
# authors_without_commas = authors[topkindices_cleaned]
#
# authors_to_return = []
# for author in authors_without_commas:
# authors_to_return.append(author.replace(',', ';'))
topPapers = {
'titles': titles[topkindices_cleaned].to_json(),
'authors': authors[topkindices_cleaned].to_json(),
'affiliations':affiliations[topkindices_cleaned].to_json(),
'years':years[topkindices_cleaned].to_json(),
'citations': num_references[topkindices_cleaned].to_json()
}
# return titles, years, authors, and affiliations for top papers
return topPapers
except Exception as e:
return HttpResponse(
"Error running the program. Please contact the IP Group Analytics Team (<EMAIL>) to resolve the issue. Please provide the error details below in your email. \nPlease provide all the steps to reproduce this issue. \n" + "-" * 40 + "\n" + str(
e) + "\n" + "-" * 40)
def Spotlight_Top_Patents(df):
try:
# df = pd.read_csv(datafile, sep=',', encoding='ISO-8859-1')
# print(df.head(5))
# Get topk from GUI (e.g., top 5, top 10, top 15, or top 20)
topk = 10
references = df['Domestic References-By']
num_references = []
for ref in references:
if type(ref) == str:
if ref != '':
num_ref = len(ref.split(','))
num_references.append(num_ref)
else:
num_references.append(0)
elif type(ref) == float:
num_references.append(0)
else:
num_references.append(0)
titles = df['Title']
application_dates = df['Appl. Date']
assignees = df['Assignee']
topkindices = np.argsort(num_references)[::-1]
topkindices_cleaned = []
num_references = | pd.DataFrame(num_references, columns=['References']) | pandas.DataFrame |
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
sess = tf.Session(config=tf_config)
import sys
if not '../' in sys.path: sys.path.append('../')
import pandas as pd
from utils import data_utils
from model_config import config
from train_discriminator import get_label_vec
from ved_varAttnMultiTask import VarSeq2SeqVarAttnMultiTaskModel
def train_model(config):
print('[INFO] Preparing data for experiment: {}'.format(config['experiment']))
if config['experiment'] == 'qgen':
train_data = pd.read_csv(config['data_dir'] + 'df_qgen_train.csv')
val_data = pd.read_csv(config['data_dir'] + 'df_qgen_val.csv')
test_data = pd.read_csv(config['data_dir'] + 'df_qgen_test.csv')
input_sentences = | pd.concat([train_data['answer'], val_data['answer'], test_data['answer']]) | pandas.concat |
#!/usr/bin/env python3
import pytest
import os
import pathlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import torch
from neuralprophet import NeuralProphet, set_random_seed
from neuralprophet import df_utils
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "tests", "test-data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
NROWS = 256
EPOCHS = 2
BATCH_SIZE = 64
LR = 1.0
PLOT = False
def test_names():
log.info("testing: names")
m = NeuralProphet()
m._validate_column_name("hello_friend")
def test_train_eval_test():
log.info("testing: Train Eval Test")
m = NeuralProphet(
n_lags=10,
n_forecasts=3,
ar_sparsity=0.1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
df = m._handle_missing_data(df, freq="D", predicting=False)
df_train, df_test = m.split_df(df, freq="D", valid_p=0.1)
metrics = m.fit(df_train, freq="D", validation_df=df_test)
val_metrics = m.test(df_test)
log.debug("Metrics: train/eval: \n {}".format(metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
log.debug("Metrics: test: \n {}".format(val_metrics.to_string(float_format=lambda x: "{:6.3f}".format(x))))
def test_df_utils_func():
log.info("testing: df_utils Test")
df = pd.read_csv(PEYTON_FILE, nrows=95)
df = df_utils.check_dataframe(df, check_y=False)
# test find_time_threshold
df_dict, _ = df_utils.prep_copy_df_dict(df)
time_threshold = df_utils.find_time_threshold(df_dict, n_lags=2, valid_p=0.2, inputs_overbleed=True)
df_train, df_val = df_utils.split_considering_timestamp(
df_dict, n_lags=2, n_forecasts=2, inputs_overbleed=True, threshold_time_stamp=time_threshold
)
# init data params with a list
global_data_params = df_utils.init_data_params(df_dict, normalize="soft")
global_data_params = df_utils.init_data_params(df_dict, normalize="soft1")
global_data_params = df_utils.init_data_params(df_dict, normalize="standardize")
log.debug("Time Threshold: \n {}".format(time_threshold))
log.debug("Df_train: \n {}".format(type(df_train)))
log.debug("Df_val: \n {}".format(type(df_val)))
def test_trend():
log.info("testing: Trend")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
growth="linear",
n_changepoints=10,
changepoints_range=0.9,
trend_reg=1,
trend_reg_threshold=False,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_custom_changepoints():
log.info("testing: Custom Changepoints")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
dates = df["ds"][range(1, len(df) - 1, int(len(df) / 5.0))]
dates_list = [str(d) for d in dates]
dates_array = pd.to_datetime(dates_list).values
log.debug("dates: {}".format(dates))
log.debug("dates_list: {}".format(dates_list))
log.debug("dates_array: {} {}".format(dates_array.dtype, dates_array))
for cp in [dates_list, dates_array]:
m = NeuralProphet(
changepoints=cp,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# print(m.config_trend)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_no_trend():
log.info("testing: No-Trend")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
growth="off",
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=60, n_historic_predictions=60)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_seasons():
log.info("testing: Seasonality: additive")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="additive",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("SUM of yearly season params: {}".format(sum(abs(m.model.season_params["yearly"].data.numpy()))))
log.debug("SUM of weekly season params: {}".format(sum(abs(m.model.season_params["weekly"].data.numpy()))))
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
log.info("testing: Seasonality: multiplicative")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
# m = NeuralProphet(n_lags=60, n_changepoints=10, n_forecasts=30, verbose=True)
m = NeuralProphet(
yearly_seasonality=8,
weekly_seasonality=4,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
def test_custom_seasons():
log.info("testing: Custom Seasonality")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
other_seasons = False
m = NeuralProphet(
yearly_seasonality=other_seasons,
weekly_seasonality=other_seasons,
daily_seasonality=other_seasons,
seasonality_mode="additive",
# seasonality_mode="multiplicative",
seasonality_reg=1,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_seasonality(name="quarterly", period=90, fourier_order=5)
log.debug("seasonalities: {}".format(m.season_config.periods))
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=365, periods=365)
forecast = m.predict(df=future)
log.debug("season params: {}".format(m.model.season_params.items()))
if PLOT:
m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar():
log.info("testing: AR")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=7,
yearly_seasonality=False,
epochs=EPOCHS,
# batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_sparse():
log.info("testing: AR (sparse")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=3,
n_lags=14,
ar_sparsity=0.5,
yearly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_ar_deep():
log.info("testing: AR-Net (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
yearly_seasonality=False,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=90)
forecast = m.predict(df=future)
if PLOT:
m.plot_last_forecast(forecast, include_previous_forecasts=3)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg():
log.info("testing: Lagged Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
m = m.add_lagged_regressor(names="A")
m = m.add_lagged_regressor(names="B", only_last_value=True)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, n_historic_predictions=10)
forecast = m.predict(future)
if PLOT:
print(forecast.to_string())
m.plot_last_forecast(forecast, include_previous_forecasts=5)
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_lag_reg_deep():
log.info("testing: List of Lagged Regressors (deep)")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=1,
n_lags=14,
num_hidden_layers=2,
d_hidden=32,
weekly_seasonality=False,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(15, min_periods=1).mean()
df["C"] = df["y"].rolling(30, min_periods=1).mean()
cols = [col for col in df.columns if col not in ["ds", "y"]]
m = m.add_lagged_regressor(names=cols)
m.highlight_nth_step_ahead_of_each_forecast(m.n_forecasts)
metrics_df = m.fit(df, freq="D")
forecast = m.predict(df)
if PLOT:
# print(forecast.to_string())
# m.plot_last_forecast(forecast, include_previous_forecasts=10)
# m.plot(forecast)
# m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_events():
log.info("testing: Events")
df = pd.read_csv(PEYTON_FILE)[-NROWS:]
playoffs = pd.DataFrame(
{
"event": "playoff",
"ds": pd.to_datetime(
[
"2008-01-13",
"2009-01-03",
"2010-01-16",
"2010-01-24",
"2010-02-07",
"2011-01-08",
"2013-01-12",
"2014-01-12",
"2014-01-19",
"2014-02-02",
"2015-01-11",
"2016-01-17",
"2016-01-24",
"2016-02-07",
]
),
}
)
superbowls = pd.DataFrame(
{
"event": "superbowl",
"ds": pd.to_datetime(["2010-02-07", "2014-02-02", "2016-02-07"]),
}
)
events_df = pd.concat((playoffs, superbowls))
m = NeuralProphet(
n_lags=2,
n_forecasts=30,
daily_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
# set event windows
m = m.add_events(
["superbowl", "playoff"], lower_window=-1, upper_window=1, mode="multiplicative", regularization=0.5
)
# add the country specific holidays
m = m.add_country_holidays("US", mode="additive", regularization=0.5)
m.add_country_holidays("Indonesia")
m.add_country_holidays("Thailand")
m.add_country_holidays("Philippines")
m.add_country_holidays("Pakistan")
m.add_country_holidays("Belarus")
history_df = m.create_df_with_events(df, events_df)
metrics_df = m.fit(history_df, freq="D")
future = m.make_future_dataframe(df=history_df, events_df=events_df, periods=30, n_historic_predictions=90)
forecast = m.predict(df=future)
log.debug("Event Parameters:: {}".format(m.model.event_params))
if PLOT:
m.plot_components(forecast)
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_future_reg():
log.info("testing: Future Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS + 50)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
df["A"] = df["y"].rolling(7, min_periods=1).mean()
df["B"] = df["y"].rolling(30, min_periods=1).mean()
regressors_df_future = pd.DataFrame(data={"A": df["A"][-50:], "B": df["B"][-50:]})
df = df[:-50]
m = m.add_future_regressor(name="A")
m = m.add_future_regressor(name="B", mode="multiplicative")
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df=df, regressors_df=regressors_df_future, n_historic_predictions=10, periods=50)
forecast = m.predict(df=future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_plot():
log.info("testing: Plotting")
df = pd.read_csv(PEYTON_FILE, nrows=NROWS)
m = NeuralProphet(
n_forecasts=7,
n_lags=14,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=m.n_forecasts, n_historic_predictions=10)
forecast = m.predict(future)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
m.highlight_nth_step_ahead_of_each_forecast(7)
forecast = m.predict(df)
m.plot(forecast)
m.plot_last_forecast(forecast, include_previous_forecasts=10)
m.plot_components(forecast)
m.plot_parameters()
if PLOT:
plt.show()
def test_air_data():
log.info("TEST air_passengers.csv")
df = pd.read_csv(AIR_FILE)
m = NeuralProphet(
n_changepoints=0,
yearly_seasonality=2,
seasonality_mode="multiplicative",
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="MS")
future = m.make_future_dataframe(df, periods=48, n_historic_predictions=len(df) - m.n_lags)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_components(forecast)
m.plot_parameters()
plt.show()
def test_random_seed():
log.info("TEST random seed")
df = pd.read_csv(PEYTON_FILE, nrows=512)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum1 = sum(forecast["yhat1"].values)
set_random_seed(0)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum2 = sum(forecast["yhat1"].values)
set_random_seed(1)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
checksum3 = sum(forecast["yhat1"].values)
log.debug("should be same: {} and {}".format(checksum1, checksum2))
log.debug("should not be same: {} and {}".format(checksum1, checksum3))
assert math.isclose(checksum1, checksum2)
assert not math.isclose(checksum1, checksum3)
def test_yosemite():
log.info("TEST Yosemite Temps")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
changepoints_range=0.95,
n_changepoints=15,
weekly_seasonality=False,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(df, freq="5min")
future = m.make_future_dataframe(df, periods=12 * 24, n_historic_predictions=12 * 24)
forecast = m.predict(future)
if PLOT:
m.plot(forecast)
m.plot_parameters()
plt.show()
def test_model_cv():
log.info("CV from model")
def check_simple(df):
m = NeuralProphet(
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq="D", k=5, fold_pct=0.1, fold_overlap_pct=0.5)
assert all([70 + i * 5 == len(train) for i, (train, val) in enumerate(folds)])
assert all([10 == len(val) for (train, val) in folds])
def check_cv(df, freq, n_lags, n_forecasts, k, fold_pct, fold_overlap_pct):
m = NeuralProphet(
n_lags=n_lags,
n_forecasts=n_forecasts,
learning_rate=LR,
)
folds = m.crossvalidation_split_df(df, freq=freq, k=k, fold_pct=fold_pct, fold_overlap_pct=fold_overlap_pct)
total_samples = len(df) - m.n_lags + 2 - (2 * m.n_forecasts)
per_fold = int(fold_pct * total_samples)
not_overlap = per_fold - int(fold_overlap_pct * per_fold)
assert all([per_fold == len(val) - m.n_lags + 1 - m.n_forecasts for (train, val) in folds])
assert all(
[
total_samples - per_fold - (k - i - 1) * not_overlap == len(train) - m.n_lags + 1 - m.n_forecasts
for i, (train, val) in enumerate(folds)
]
)
check_simple(pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}))
check_cv(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}),
n_lags=10,
n_forecasts=5,
freq="D",
k=5,
fold_pct=0.1,
fold_overlap_pct=0,
)
check_cv(
df=pd.DataFrame({"ds": pd.date_range(start="2017-01-01", periods=100), "y": np.arange(100)}),
n_lags=10,
n_forecasts=15,
freq="D",
k=5,
fold_pct=0.1,
fold_overlap_pct=0.5,
)
def test_loss_func():
log.info("TEST setting torch.nn loss func")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
loss_func="MSE",
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
def test_loss_func_torch():
log.info("TEST setting torch.nn loss func")
df = pd.read_csv(PEYTON_FILE, nrows=512)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
loss_func=torch.nn.MSELoss,
learning_rate=LR,
)
metrics_df = m.fit(df, freq="D")
future = m.make_future_dataframe(df, periods=10, n_historic_predictions=10)
forecast = m.predict(future)
def test_callable_loss():
log.info("TEST Callable Loss")
def my_loss(output, target):
assym_penalty = 1.25
beta = 1
e = target - output
me = torch.abs(e)
z = torch.where(me < beta, 0.5 * (me ** 2) / beta, me - 0.5 * beta)
z = torch.where(e < 0, z, assym_penalty * z)
return z
df = pd.read_csv(YOS_FILE, nrows=NROWS)
# auto-lr with range test
m = NeuralProphet(
seasonality_mode="multiplicative",
loss_func=my_loss,
)
with pytest.raises(ValueError):
# find_learning_rate only suports normal torch Loss functions
metrics = m.fit(df, freq="5min")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
loss_func=my_loss,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=0.1, # bypasses find_learning_rate
)
metrics = m.fit(df, freq="5min")
future = m.make_future_dataframe(df, periods=12 * 24, n_historic_predictions=12 * 24)
forecast = m.predict(future)
def test_custom_torch_loss():
log.info("TEST PyTorch Custom Loss")
class MyLoss(torch.nn.modules.loss._Loss):
def forward(self, input, target):
alpha = 0.9
y_diff = target - input
yhat_diff = input - target
loss = (
(
alpha * torch.max(y_diff, torch.zeros_like(y_diff))
+ (1 - alpha) * torch.max(yhat_diff, torch.zeros_like(yhat_diff))
)
.sum()
.mean()
)
return loss
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
loss_func=MyLoss, # auto-lr with range test
)
with pytest.raises(ValueError):
# find_learning_rate only suports normal torch Loss functions
metrics = m.fit(df, freq="5min")
df = pd.read_csv(YOS_FILE, nrows=NROWS)
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
loss_func=MyLoss,
learning_rate=1, # bypasses find_learning_rate
)
metrics = m.fit(df, freq="5min")
future = m.make_future_dataframe(df, periods=12, n_historic_predictions=12)
forecast = m.predict(future)
def test_global_modeling_split_df():
### GLOBAL MODELLING - SPLIT DF
log.info("Global Modeling - Split df")
df = pd.read_csv(PEYTON_FILE, nrows=512)
df1 = df.iloc[:128, :].copy(deep=True)
df2 = df.iloc[128:256, :].copy(deep=True)
df3 = df.iloc[256:384, :].copy(deep=True)
df_dict = {"dataset1": df1, "dataset2": df2, "dataset3": df3}
m = NeuralProphet(
n_forecasts=2,
n_lags=3,
learning_rate=LR,
)
log.info("split df with single df")
df_train, df_val = m.split_df(df1)
log.info("split df with dict of dataframes")
df_train, df_val = m.split_df(df_dict)
log.info("split df with dict of dataframes - local_split")
df_train, df_val = m.split_df(df_dict, local_split=True)
def test_global_modeling_no_exogenous_variable():
### GLOBAL MODELLING - NO EXOGENOUS VARIABLE
log.info("Global Modeling - No exogenous variables")
df = pd.read_csv(PEYTON_FILE, nrows=512)
df1_0 = df.iloc[:128, :].copy(deep=True)
df2_0 = df.iloc[128:256, :].copy(deep=True)
df3_0 = df.iloc[256:384, :].copy(deep=True)
df4_0 = df.iloc[384:, :].copy(deep=True)
train_input = {0: df1_0, 1: {"df1": df1_0, "df2": df2_0}, 2: {"df1": df1_0, "df2": df2_0}}
test_input = {0: df3_0, 1: {"df1": df3_0}, 2: {"df1": df3_0, "df2": df4_0}}
info_input = {
0: "Testing df train / df test - no events, no regressors",
1: "Testing dict df train / df test - no events, no regressors",
2: "Testing dict df train / dict df test - no events, no regressors",
}
for i in range(0, 3):
log.info(info_input[i])
m = NeuralProphet(
n_forecasts=2,
n_lags=10,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
metrics = m.fit(train_input[i], freq="D")
forecast = m.predict(df=test_input[i])
forecast_trend = m.predict_trend(df=test_input[i])
forecast_seasonal_componets = m.predict_seasonal_components(df=test_input[i])
if PLOT:
forecast = forecast if isinstance(forecast, list) else [forecast]
for key in forecast:
fig1 = m.plot(forecast[key])
fig2 = m.plot(forecast[key])
with pytest.raises(ValueError):
forecast = m.predict({"df4": df4_0})
log.info("Error - dict with names not provided in the train dict (not in the data params dict)")
with pytest.raises(ValueError):
metrics = m.test({"df4": df4_0})
log.info("Error - dict with names not provided in the train dict (not in the data params dict)")
m = NeuralProphet(
n_forecasts=2,
n_lags=10,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.fit({"df1": df1_0, "df2": df2_0}, freq="D")
with pytest.raises(ValueError):
forecast = m.predict({"df4": df4_0})
# log.info("unknown_data_normalization was not set to True")
with pytest.raises(ValueError):
metrics = m.test({"df4": df4_0})
# log.info("unknown_data_normalization was not set to True")
with pytest.raises(ValueError):
forecast_trend = m.predict_trend({"df4": df4_0})
# log.info("unknown_data_normalization was not set to True")
with pytest.raises(ValueError):
forecast_seasonal_componets = m.predict_seasonal_components({"df4": df4_0})
# log.info("unknown_data_normalization was not set to True")
# Set unknown_data_normalization to True - now there should be no errors
m.config_normalization.unknown_data_normalization = True
forecast = m.predict({"df4": df4_0})
metrics = m.test({"df4": df4_0})
forecast_trend = m.predict_trend({"df4": df4_0})
forecast_seasonal_componets = m.predict_seasonal_components({"df4": df4_0})
m.plot_parameters(df_name="df1")
m.plot_parameters()
def test_global_modeling_validation_df():
log.info("Global Modeling + Local Normalization")
df = pd.read_csv(PEYTON_FILE, nrows=512)
df1_0 = df.iloc[:128, :].copy(deep=True)
df2_0 = df.iloc[128:256, :].copy(deep=True)
df_dict = {"df1": df1_0, "df2": df2_0}
m = NeuralProphet(
n_forecasts=2,
n_lags=10,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
with pytest.raises(ValueError):
m.fit(df_dict, freq="D", validation_df=df2_0)
log.info("Error - name of validation df was not provided")
m = NeuralProphet(
n_forecasts=2,
n_lags=10,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m.fit(df_dict, freq="D", validation_df={"df2": df2_0})
# Now it works because we provide the name of the validation_df
def test_global_modeling_global_normalization():
### GLOBAL MODELLING - NO EXOGENOUS VARIABLES - GLOBAL NORMALIZATION
log.info("Global Modeling + Global Normalization")
df = pd.read_csv(PEYTON_FILE, nrows=512)
df1_0 = df.iloc[:128, :].copy(deep=True)
df2_0 = df.iloc[128:256, :].copy(deep=True)
df3_0 = df.iloc[256:384, :].copy(deep=True)
m = NeuralProphet(
n_forecasts=2, n_lags=10, epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=LR, global_normalization=True
)
train_dict = {"df1": df1_0, "df2": df2_0}
test_dict = {"df3": df3_0}
m.fit(train_dict)
future = m.make_future_dataframe(test_dict)
forecast = m.predict(future)
metrics = m.test(test_dict)
forecast_trend = m.predict_trend(test_dict)
forecast_seasonal_componets = m.predict_seasonal_components(test_dict)
def test_global_modeling_with_future_regressors():
### GLOBAL MODELLING + REGRESSORS
log.info("Global Modeling + Regressors")
df = pd.read_csv(PEYTON_FILE, nrows=512)
df1 = df.iloc[:128, :].copy(deep=True)
df2 = df.iloc[128:256, :].copy(deep=True)
df3 = df.iloc[256:384, :].copy(deep=True)
df4 = df.iloc[384:, :].copy(deep=True)
df1["A"] = df1["y"].rolling(30, min_periods=1).mean()
df2["A"] = df2["y"].rolling(10, min_periods=1).mean()
df3["A"] = df3["y"].rolling(40, min_periods=1).mean()
df4["A"] = df4["y"].rolling(20, min_periods=1).mean()
future_regressors_df3 = pd.DataFrame(data={"A": df3["A"][:30]})
future_regressors_df4 = pd.DataFrame(data={"A": df4["A"][:40]})
train_input = {0: df1, 1: {"df1": df1, "df2": df2}, 2: {"df1": df1, "df2": df2}}
test_input = {0: df3, 1: {"df1": df3}, 2: {"df1": df3, "df2": df4}}
regressors_input = {
0: future_regressors_df3,
1: {"df1": future_regressors_df3},
2: {"df1": future_regressors_df3, "df2": future_regressors_df4},
}
info_input = {
0: "Testing df train / df test - df regressor, no events",
1: "Testing dict df train / df test - df regressors, no events",
2: "Testing dict df train / dict df test - dict regressors, no events",
}
for i in range(0, 3):
log.info(info_input[i])
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_future_regressor(name="A")
metrics = m.fit(train_input[i], freq="D")
future = m.make_future_dataframe(test_input[i], n_historic_predictions=True, regressors_df=regressors_input[i])
forecast = m.predict(future)
# if PLOT: #fix plot_components
# forecast = forecast if isinstance(forecast, dict) else {'df1':forecast}
# for key in forecast:
# fig = m.plot(forecast[key])
# fig = m.plot_components(forecast[key])
# Possible errors with regressors
m = NeuralProphet(
epochs=EPOCHS,
batch_size=BATCH_SIZE,
learning_rate=LR,
)
m = m.add_future_regressor(name="A")
metrics = m.fit({"df1": df1, "df2": df2}, freq="D")
with pytest.raises(ValueError):
future = m.make_future_dataframe(
{"df1": df3, "df2": df4}, n_historic_predictions=True, regressors_df={"df1": future_regressors_df3}
)
log.info("Error - dict of regressors len is different than dict of dataframes len")
with pytest.raises(ValueError):
future = m.make_future_dataframe(
{"df1": df3}, n_historic_predictions=True, regressors_df={"dfn": future_regressors_df3}
)
log.info("Error - key for regressors not valid")
def test_global_modeling_with_lagged_regressors():
### GLOBAL MODELLING + REGRESSORS
log.info("Global Modeling + Regressors")
df = | pd.read_csv(PEYTON_FILE, nrows=512) | pandas.read_csv |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from sqlalchemy import DateTime
from spotrix import db
from spotrix.utils import core as utils
from .helpers import get_example_data, get_table_connector_registry
def load_flights(only_metadata: bool = False, force: bool = False) -> None:
"""Loading random time series data from a zip file in the repo"""
tbl_name = "Flights"
database = utils.get_example_database()
table_exists = database.has_table_by_name(tbl_name)
if not only_metadata and (not table_exists or force):
data = get_example_data("flight_data.csv.gz", make_bytes=True)
pdf = | pd.read_csv(data, encoding="latin-1") | pandas.read_csv |
# This code extract the features from the raw joined dataset (data.csv)
# and save it in the LibSVM format.
# Usage: python construct_features.py
import pandas as pd
import numpy as np
from sklearn.datasets import dump_svmlight_file
df = pd.read_csv("data.csv", low_memory=False)
# NPU
NPU = df.NPU.copy()
NPU[NPU == ' '] = np.nan
NPU = pd.get_dummies(NPU, prefix="NPU")
# SiteZip
SiteZip = df.SiteZip.copy()
SiteZip = SiteZip.str.replace(',','')
SiteZip = SiteZip.str.replace('\.00','')
SiteZip = SiteZip.replace('0',np.nan)
SiteZip = pd.get_dummies(SiteZip, prefix="SiteZip")
# Submarket1
Submarket1 = df.Submarket1.copy()
Submarket1 = pd.get_dummies(Submarket1, prefix="Submarket1")
# TAX_DISTR
TAX_DISTR = df.TAX_DISTR.copy()
TAX_DISTR[TAX_DISTR == ' '] = np.nan
TAX_DISTR = pd.get_dummies(TAX_DISTR, prefix="TAX_DISTR")
# NBHD
NBHD = df.NBHD.copy()
NBHD[NBHD == ' '] = np.nan
NBHD = pd.get_dummies(NBHD, prefix="NBHD")
# ZONING_NUM
ZONING_NUM = df.ZONING_NUM.copy()
ZONING_NUM[ZONING_NUM == ' '] = np.nan
ZONING_NUM = pd.get_dummies(ZONING_NUM, prefix="ZONING_NUM")
# building_c
building_c = df.building_c.copy()
building_c[building_c == ' '] = np.nan
building_c = pd.get_dummies(building_c, prefix="building_c")
# PROP_CLASS
PROP_CLASS = df.PROP_CLASS.copy()
PROP_CLASS[PROP_CLASS == ' '] = np.nan
PROP_CLASS = pd.get_dummies(PROP_CLASS, prefix="PROP_CLASS")
# Existing_p
Existing_p = df.Existing_p.copy()
Existing_p[Existing_p == ' '] = np.nan
Existing_p = pd.get_dummies(Existing_p, prefix="Existing_p")
# PropertyTy
PropertyTy = df.PropertyTy.copy()
PropertyTy = pd.get_dummies(PropertyTy, prefix="PropertyTy")
# secondaryT
secondaryT = df.secondaryT.copy()
secondaryT[secondaryT == ' '] = np.nan
secondaryT = pd.get_dummies(secondaryT, prefix="secondaryT")
# LUC
LUC = df.LUC.copy()
LUC[LUC == ' '] = np.nan
LUC = pd.get_dummies(LUC, prefix="LUC")
# Taxes_Per_
Taxes_Per_ = df.Taxes_Per_.copy()
Taxes_Per_zero = (Taxes_Per_ == "0").apply(int)
Taxes_Per_zero.name = 'Taxes_Per_zero'
Taxes_Per_ = Taxes_Per_.str.replace(',','').astype(float)
Taxes_Per_ = np.log1p(Taxes_Per_)
Taxes_Per_ = Taxes_Per_ / Taxes_Per_.max()
Taxes_Per_ = pd.concat([Taxes_Per_, Taxes_Per_zero], axis=1)
# Taxes_Tota
Taxes_Tota = df.Taxes_Tota.copy()
Taxes_Tota_zero = (Taxes_Tota == "0").apply(int)
Taxes_Tota_zero.name = 'Taxes_Tota_zero'
Taxes_Tota = Taxes_Tota.str.replace(',','').astype(float)
Taxes_Tota = np.log1p(Taxes_Tota)
Taxes_Tota = Taxes_Tota / Taxes_Tota.max()
Taxes_Tota = pd.concat([Taxes_Tota, Taxes_Tota_zero], axis=1)
# TOT_APPR
TOT_APPR = df.TOT_APPR.copy()
TOT_APPR_zero = (TOT_APPR == "0").apply(int)
TOT_APPR_zero.name = 'TOT_APPR_zero'
TOT_APPR = TOT_APPR.str.replace(',','').astype(float)
TOT_APPR = np.log1p(TOT_APPR)
TOT_APPR = TOT_APPR / TOT_APPR.max()
TOT_APPR = pd.concat([TOT_APPR, TOT_APPR_zero], axis=1)
# VAL_ACRES
VAL_ACRES = df.VAL_ACRES.copy()
VAL_ACRES_zero = (VAL_ACRES == 0).apply(int)
VAL_ACRES_zero.name = 'VAL_ACRES_zero'
VAL_ACRES = np.log1p(VAL_ACRES)
VAL_ACRES = VAL_ACRES / VAL_ACRES.max()
VAL_ACRES = pd.concat([VAL_ACRES, VAL_ACRES_zero], axis=1)
# For_Sale_P
For_Sale_P = df.For_Sale_P.copy()
For_Sale_P_notNA = (For_Sale_P != " ").apply(int)
For_Sale_P_notNA.name = 'For_Sale_P_notNA'
For_Sale_P[For_Sale_P == ' '] = 0
For_Sale_P = For_Sale_P.astype(float)
For_Sale_P = np.log1p(For_Sale_P)
For_Sale_P = For_Sale_P / For_Sale_P.max()
For_Sale_P = pd.concat([For_Sale_P, For_Sale_P_notNA], axis=1)
# Last_Sale1
Last_Sale1 = df.Last_Sale1.copy()
Last_Sale1_zero = (Last_Sale1 == "0").apply(int)
Last_Sale1_zero.name = "Last_Sale1_zero"
Last_Sale1 = Last_Sale1.str.replace(',','').astype(float)
Last_Sale1 = np.log1p(Last_Sale1)
Last_Sale1 = (Last_Sale1 - Last_Sale1.min()) / (Last_Sale1.max() - Last_Sale1.min())
Last_Sale1 = pd.concat([Last_Sale1, Last_Sale1_zero], axis=1)
# yearbuilt
yearbuilt = df.yearbuilt.copy()
yearbuilt_zero = (yearbuilt == "0").apply(int)
yearbuilt_zero.name = "yearbuilt_zero"
yearbuilt[yearbuilt == "0"] = np.nan
yearbuilt = yearbuilt.str.replace(',','').astype(float)
yearbuilt = (yearbuilt - yearbuilt.min()) / (yearbuilt.max() - yearbuilt.min())
yearbuilt = yearbuilt.fillna(0)
yearbuilt = pd.concat([yearbuilt, yearbuilt_zero], axis=1)
# year_reno
year_reno = df.year_reno.copy()
reno = (year_reno != "0").apply(int)
reno.name = "reno"
year_reno[year_reno == "0"] = np.nan
year_reno = year_reno.str.replace(',','').astype(float)
year_reno = (year_reno - year_reno.min()) / (year_reno.max() - year_reno.min())
year_reno = year_reno.fillna(0)
year_reno = pd.concat([year_reno, reno], axis=1)
# Lot_Condition
Lot_Condition = df.Lot_Condition.copy()
Lot_Condition[Lot_Condition == ' '] = np.nan
Lot_Condition = pd.get_dummies(Lot_Condition, prefix="Lot_Condition")
# Structure_Condition
Structure_Condition = df.Structure_Condition.copy()
Structure_Condition[Structure_Condition == ' '] = np.nan
Structure_Condition = pd.get_dummies(Structure_Condition, prefix="Structure_Condition")
# Sidewalks
Sidewalks = df.Sidewalks.copy()
Sidewalks[Sidewalks == "YES"] = "Yes"
Sidewalks[Sidewalks == " "] = np.nan
Sidewalks = pd.get_dummies(Sidewalks, prefix="Sidewalks")
# Multiple_Violations
Multiple_Violations = df.Multiple_Violations.copy()
Multiple_Violations[Multiple_Violations == ' '] = np.nan
Multiple_Violations = | pd.get_dummies(Multiple_Violations, prefix="Multiple_Violations") | pandas.get_dummies |
from numpy import NaN, nan
import pandas as pd
from amparos.pesquisa import Pesquisa_Sem_Driver, Pesquisa_Com_Driver
# Verifica se o arquico .xlsx e um arquivo valido
def VerificarXlsx(local):
"""
Parameters:
local: Arquivo .xlsx para ser analisado
Returns:
return 'ERRO: Caminho ou arquvio invalido': Mensagem
return 'ERRO: Arquivo inadequado': Mensagem
return 'ERRO: Arquivo .xlsx e fora dos padrões': Mensagem
return True: Se o caminho e o arquivo estiver correto
"""
conter_na_tabela = ['sua pergunta', '1 pergunta similar', '1 pergunta similar 1 resposta', '1 pergunta similar 2 resposta', '2 pergunta similar', '2 pergunta similar 1 resposta', '2 pergunta similar 2 resposta']
contem = 0
try:
tabela = pd.read_excel(local)
except FileNotFoundError:
return 'ERRO: Caminho ou arquivo invalido'
except ValueError:
return 'ERRO: Arquivo inadequado'
# Deletar a coluna 'Unnamed: 0' ela e sempre crianda quando abrimos um arquivo .xlsx
if 'Unnamed: 0' in tabela:
tabela.drop('Unnamed: 0', axis=1, inplace=True)
# Verifiando se há as colunas necessarias
for c in conter_na_tabela:
if c in tabela.keys():
contem += 1
else:
return 'ERRO: Arquivo .xlsx fora dos padrões'
# Se o arquivo tiver as colunas necessarias
if contem == len(conter_na_tabela):
return True
# Cria uma lista com as pergunta do arquivo .xlsx
def CriarListaPerguntas(local_xlsx):
"""
Parameters:
local_xlsx: Local do arquivo .xlsx dentro dos padrões
Returns:
return lista_perguntas: Lista com todas as perguntas do arquivo .xlxs
"""
lista_perguntas = []
tabela_xlsx = | pd.read_excel(local_xlsx) | pandas.read_excel |
import os
import pandas as pd
path = './csv'
files = os.listdir(path)
df1 = pd.read_csv(path + '/' + files[0], encoding='utf_8_sig')
for file in files[1:]:
df2 = pd.read_csv(path + '/' + file, encoding='utf_8_sig')
df1 = | pd.concat([df1, df2], axis=0, ignore_index=True) | pandas.concat |
"""GitHub Model"""
__docformat__ = "numpy"
# pylint: disable=C0201,W1401
import logging
from typing import Any, Dict
import math
from datetime import datetime
import requests
import pandas as pd
from openbb_terminal import config_terminal as cfg
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
def get_github_data(url: str, **kwargs):
"""Get repository stats
Parameters
----------
url: str
github api endpoint
params: dict
params to pass to api endpoint
Returns
-------
dict with data
"""
res = requests.get(
url,
headers={
"Authorization": f"token {cfg.API_GITHUB_KEY}",
"User-Agent": get_user_agent(),
"Accept": "application/vnd.github.v3.star+json",
},
**kwargs,
)
if res.status_code == 200:
return res.json()
if res.status_code in (401, 403):
console.print("[red]Rate limit reached, please provide a GitHub API key.[/red]")
elif res.status_code == 404:
console.print("[red]Repo not found.[/red]")
else:
console.print(f"[red]Error occurred {res.json()}[/red]")
return None
def search_repos(
sortby: str = "stars", page: int = 1, categories: str = ""
) -> pd.DataFrame:
"""Get repos sorted by stars or forks. Can be filtered by categories
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
page : int
Page number to get repos
Returns
-------
pd.DataFrame with list of repos
"""
params: Dict[str, Any] = {"page": page}
if categories:
params["sort"] = sortby
params["q"] = categories.replace(",", "+")
else:
params["q"] = f"{sortby}:>1"
data = get_github_data("https://api.github.com/search/repositories", params=params)
if data and "items" in data:
return pd.DataFrame(data["items"])
return pd.DataFrame()
@log_start_end(log=logger)
def get_stars_history(repo: str):
"""Get repository star history
Parameters
----------
repo : str
Repo to search for Format: org/repo, e.g., openbb-finance/openbbterminal
Returns
-------
pd.DataFrame - Columns: Date, Stars
"""
data = get_github_data(f"https://api.github.com/repos/{repo}")
if data and "stargazers_count" in data:
stars_number = data["stargazers_count"]
stars: Dict[str, int] = {}
pages = math.ceil(stars_number / 100)
for page in range(0, pages):
data = get_github_data(
f"https://api.github.com/repos/{repo}/stargazers",
params={"per_page": 100, "page": page},
)
if data:
for star in data:
day = star["starred_at"].split("T")[0]
if day in stars:
stars[day] += 1
else:
stars[day] = 1
sorted_keys = sorted(stars.keys())
for i in range(1, len(sorted_keys)):
stars[sorted_keys[i]] += stars[sorted_keys[i - 1]]
df = pd.DataFrame(
{
"Date": [
datetime.strptime(date, "%Y-%m-%d").date() for date in stars.keys()
],
"Stars": stars.values(),
}
)
df.set_index("Date")
return df
return pd.DataFrame()
@log_start_end(log=logger)
def get_top_repos(sortby: str, top: int, categories: str) -> pd.DataFrame:
"""Get repos sorted by stars or forks. Can be filtered by categories
Parameters
----------
sortby : str
Sort repos by {stars, forks}
categories : str
Check for repo categories. If more than one separate with a comma: e.g., finance,investment. Default: None
top : int
Number of repos to search for
Returns
-------
pd.DataFrame with list of repos
"""
initial_top = top
df = pd.DataFrame(
columns=[
"full_name",
"open_issues",
"stargazers_count",
"forks_count",
"language",
"created_at",
"updated_at",
"html_url",
]
)
if top <= 100:
df2 = search_repos(sortby=sortby, page=1, categories=categories)
df = pd.concat([df, df2], ignore_index=True)
else:
p = 2
while top > 0:
df2 = search_repos(sortby=sortby, page=p, categories=categories)
df = pd.concat([df, df2], ignore_index=True)
top -= 100
p += 1
return df.head(initial_top)
@log_start_end(log=logger)
def get_repo_summary(repo: str):
"""Get repository summary
Parameters
----------
repo : str
Repo to search for Format: org/repo, e.g., openbb-finance/openbbterminal
Returns
-------
pd.DataFrame - Columns: Metric, Value
"""
data = get_github_data(f"https://api.github.com/repos/{repo}")
if not data:
return pd.DataFrame()
release_data = get_github_data(f"https://api.github.com/repos/{repo}/releases")
if not release_data:
return | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
"Y": pd.Series([2], dtype=np.int64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 2], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([3], dtype=np.int64),
"X": pd.Series([3], dtype=np.uint64),
}
),
],
False,
),
],
)
def test_schema_check_write(dfs, ok, store_factory, bound_store_dataframes):
df_list = [{"label": "cluster_1", "data": [("core", df)]} for df in dfs]
if ok:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
else:
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
assert (
"Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!"
in _exception_str(exc.value)
)
def test_schema_check_write_shared(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{"P": pd.Series([1], dtype=np.int64), "X": pd.Series([1], dtype=np.int64)}
)
df2 = pd.DataFrame(
{"P": pd.Series([1], dtype=np.uint64), "Y": pd.Series([1], dtype=np.int64)}
)
df_list = [
{"label": "cluster_1", "data": [("core", df1)]},
{"label": "cluster_2", "data": [("prediction", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P"],
metadata_version=4,
)
assert 'Found incompatible entries for column "P"' in str(exc.value)
def test_schema_check_write_nice_error(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{
"P": pd.Series([1, 1], dtype=np.int64),
"Q": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 1], dtype=np.int64),
}
)
df2 = pd.DataFrame(
{
"P": pd.Series([2, 2], dtype=np.uint64),
"Q": pd.Series([1, 2], dtype=np.int64),
"X": pd.Series([1, 1], dtype=np.int64),
}
)
df_list = [
{"label": "uuid1", "data": [("core", df1)]},
{"label": "uuid2", "data": [("core", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "Q"],
metadata_version=4,
)
assert _exception_str(exc.value).startswith(
"""Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!
Schema violation
Origin schema: {core/P=2/Q=2/uuid2}
Origin reference: {core/P=1/Q=2/uuid1}
Diff:
"""
)
def test_schema_check_write_cut_error(store_factory, bound_store_dataframes):
df1 = pd.DataFrame(
{
"P": pd.Series([1] * 100, dtype=np.int64),
"Q": pd.Series(range(100), dtype=np.int64),
"X": pd.Series([1] * 100, dtype=np.int64),
}
)
df2 = pd.DataFrame(
{
"P": pd.Series([2] * 100, dtype=np.uint64),
"Q": pd.Series(range(100), dtype=np.int64),
"X": pd.Series([1] * 100, dtype=np.int64),
}
)
df_list = [
{"label": "uuid1", "data": [("core", df1)]},
{"label": "uuid2", "data": [("core", df2)]},
]
with pytest.raises(Exception) as exc:
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "Q"],
metadata_version=4,
)
assert _exception_str(exc.value).startswith(
"""Schemas for table 'core' of dataset 'dataset_uuid' are not compatible!
Schema violation
Origin schema: {core/P=2/Q=99/uuid2}
Origin reference: {core/P=1/Q=99/uuid1}
Diff:
"""
)
def test_metadata_consistency_errors_fails(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame({"W": np.arange(0, 10), "L": np.arange(0, 10)})
df_2 = pd.DataFrame(
{"P": np.arange(10, 20), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_list = [
{"label": "cluster_1", "data": [("core", df)]},
{"label": "cluster_2", "data": [("core", df_2)]},
]
# Also test `df_list` in reverse order, as this could lead to different results
for dfs in [df_list, list(reversed(df_list))]:
with pytest.raises(
Exception, match=r"Schemas for table .* of dataset .* are not compatible!"
):
return bound_store_dataframes(
dfs, store=store_factory, metadata_version=metadata_version
)
def test_table_consistency_resistance(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame({"P": np.arange(0, 10)})
df_helper = pd.DataFrame(
{"P": np.arange(15, 35), "info": string.ascii_lowercase[:10]}
)
df_list = [
{"label": "cluster_1", "data": [("core", df)]},
{"label": "cluster_2", "data": [("core", df), ("helper", df_helper)]},
]
store_kwargs = dict(store=store_factory, metadata_version=metadata_version)
metadata1 = bound_store_dataframes(df_list, **store_kwargs)
metadata2 = bound_store_dataframes(list(reversed(df_list)), **store_kwargs)
assert set(metadata1.tables) == set(metadata2.tables) == {"core", "helper"}
def test_store_dataframes_as_dataset_overwrite(
store_factory, dataset_function, bound_store_dataframes
):
with pytest.raises(RuntimeError):
bound_store_dataframes(
[pd.DataFrame()], store=store_factory, dataset_uuid=dataset_function.uuid
)
bound_store_dataframes(
[pd.DataFrame()],
store=store_factory,
dataset_uuid=dataset_function.uuid,
overwrite=True,
)
bound_store_dataframes(
[pd.DataFrame()], store=store_factory, dataset_uuid="new_dataset_uuid"
)
def test_store_empty_dataframes_partition_on(store_factory, bound_store_dataframes):
df1 = pd.DataFrame({"x": [1], "y": [1]}).iloc[[]]
md1 = bound_store_dataframes(
[df1], store=store_factory, dataset_uuid="uuid", partition_on=["x"]
)
assert md1.tables == ["table"]
assert set(md1.table_meta["table"].names) == set(df1.columns)
df2 = pd.DataFrame({"x": [1], "y": [1], "z": [1]}).iloc[[]]
md2 = bound_store_dataframes(
[df2],
store=store_factory,
dataset_uuid="uuid",
partition_on=["x"],
overwrite=True,
)
assert md2.tables == ["table"]
assert set(md2.table_meta["table"].names) == set(df2.columns)
df3 = pd.DataFrame({"x": [1], "y": [1], "a": [1]}).iloc[[]]
md3 = bound_store_dataframes(
[{"table2": df3}],
store=store_factory,
dataset_uuid="uuid",
partition_on=["x"],
overwrite=True,
)
assert md3.tables == ["table2"]
assert set(md3.table_meta["table2"].names) == set(df3.columns)
def test_store_overwrite_none(store_factory, bound_store_dataframes):
df1 = | pd.DataFrame({"x": [1], "y": [1]}) | pandas.DataFrame |
import os
import sys
import numpy as np
import pandas as pd
import time
import scipy.sparse
import scipy.sparse.linalg
from scipy import stats
from scipy.optimize import minimize
np.set_printoptions(threshold=sys.maxsize)
# Add lib to the python path.
from genTestDat import genTestData2D, prodMats2D
from est2d import *
from est3d import *
from npMatrix2d import *
from npMatrix3d import *
# ==================================================================================
#
# The below code runs multiple simulations in serial. It takes the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations (default=1000)
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def sim2D(desInd, OutDir, nsim=1000, mode='param', REML=False):
# Loop through and run simulations
for simInd in range(1,nsim+1):
runSim(simInd, desInd, OutDir, mode, REML)
# ==================================================================================
#
# The below simulates random test data and runs all methods described in the LMM
# paper on the simulated data. It requires the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - SimInd: An index to represent the simulation. All output for this simulation will
# be saved in files with the index specified by this argument. The
# simulation with index 1 will also perform any necessary additional setup
# and should therefore be run before any others.
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def runSim(simInd, desInd, OutDir, mode='param', REML=False):
# Make sure simInd is an int
simInd = int(simInd)
#===============================================================================
# Setup
#===============================================================================
# Decide whether we wish to run T statistics/degrees of freedom estimation
if mode=='param':
runDF = False
else:
runDF = True
# Different designs
if desInd==1:
nlevels = np.array([50])
nraneffs = np.array([2])
if desInd==2:
nlevels = np.array([50,25])
nraneffs = np.array([3,2])
if desInd==3:
nlevels = np.array([100,30,10])
nraneffs = np.array([4,3,2])
# Number of observations
n = 1000
# If we are doing a degrees of freedom simulation, create the factor vectors, X and Z if
# this is the first run. These will then be used across all following simulations. If we
# are doing a simulation to look at parameter estimation, we recreate the design on every
# run as our focus is to stress test the performance of the algorithms, rather than compare
# performance of one specific model in particular.
if simInd == 1 or not runDF:
# Delete any factor vectors from a previous batch of simulations.
if runDF:
for i in range(len(nlevels)):
if os.path.isfile(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv')):
os.remove(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'))
fvs = None
X = None
Z = None
# Otherwise read the factor vectors, X and Z in from file.
else:
# Initialize empty factor vectors dict
fvs = dict()
# Loop through factors and save factor vectors
for i in range(len(nlevels)):
fvs[i] = pd.io.parsers.read_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), header=None).values
X = pd.io.parsers.read_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), header=None).values
Z = pd.io.parsers.read_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), header=None).values
# Generate test data
Y,X,Z,nlevels,nraneffs,beta,sigma2,b,D, fvs = genTestData2D(n=n, p=5, nlevels=nlevels, nraneffs=nraneffs, save=True, simInd=simInd, desInd=desInd, OutDir=OutDir, factorVectors=fvs, X=X, Z=Z)
# Save the new factor vectors if this is the first run.
if simInd == 1 and runDF:
# Loop through the factors saving them
for i in range(len(nlevels)):
pd.DataFrame(fvs[i]).to_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), index=False, header=None)
pd.DataFrame(X).to_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), index=False, header=None)
pd.DataFrame(Z).to_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), index=False, header=None)
# Work out number of observations, parameters, random effects, etc
n = X.shape[0]
p = X.shape[1]
q = np.sum(nraneffs*nlevels)
qu = np.sum(nraneffs*(nraneffs+1)//2)
r = nlevels.shape[0]
# Tolerance
tol = 1e-6
# Work out factor indices.
facInds = np.cumsum(nraneffs*nlevels)
facInds = np.insert(facInds,0,0)
# Convert D to dict
Ddict=dict()
for k in np.arange(len(nlevels)):
Ddict[k] = D[facInds[k]:(facInds[k]+nraneffs[k]),facInds[k]:(facInds[k]+nraneffs[k])]
# Get the product matrices
XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ = prodMats2D(Y,Z,X)
# -----------------------------------------------------------------------------
# Create empty data frame for results:
# -----------------------------------------------------------------------------
# Row indices
indexVec = np.array(['Time', 'nit', 'llh'])
for i in np.arange(p):
indexVec = np.append(indexVec, 'beta'+str(i+1))
# Sigma2
indexVec = np.append(indexVec, 'sigma2')
# Dk
for k in np.arange(r):
for j in np.arange(nraneffs[k]*(nraneffs[k]+1)//2):
indexVec = np.append(indexVec, 'D'+str(k+1)+','+str(j+1))
# Sigma2*Dk
for k in np.arange(r):
for j in np.arange(nraneffs[k]*(nraneffs[k]+1)//2):
indexVec = np.append(indexVec, 'sigma2*D'+str(k+1)+','+str(j+1))
# If we're doing a T statistic simulation add the T statistics, p values and
# degrees of freedom rows to the dataframe.
if runDF:
# T value p value and Satterthwaite degrees of freedom estimate.
indexVec = np.append(indexVec,'T')
indexVec = np.append(indexVec,'p')
indexVec = np.append(indexVec,'swdf')
# Construct dataframe
results = pd.DataFrame(index=indexVec, columns=['Truth', 'FS', 'fFS', 'SFS', 'fSFS', 'cSFS'])
# ------------------------------------------------------------------------------------
# Truth
# ------------------------------------------------------------------------------------
# Default time and number of iterations
results.at['Time','Truth']=0
results.at['nit','Truth']=0
# Construct parameter vector
paramVec_true = beta[:]
paramVec_true = np.concatenate((paramVec_true,np.array(sigma2).reshape(1,1)),axis=0)
# Add D to parameter vector
facInds = np.cumsum(nraneffs*nlevels)
facInds = np.insert(facInds,0,0)
# Convert D to vector
for k in np.arange(len(nlevels)):
vechD = mat2vech2D(D[facInds[k]:(facInds[k]+nraneffs[k]),facInds[k]:(facInds[k]+nraneffs[k])])/sigma2
paramVec_true = np.concatenate((paramVec_true,vechD),axis=0)
# Add results to parameter vector
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'Truth']=paramVec_true[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'Truth']=paramVec_true[p,0]*paramVec_true[i-3,0]
# Matrices needed for
Zte = ZtY - ZtX @ beta
ete = ssr2D(YtX, YtY, XtX, beta)
DinvIplusZtZD = D @ np.linalg.inv(np.eye(q) + ZtZ @ D)
# True log likelihood
llh = llh2D(n, ZtZ, Zte, ete, sigma2, DinvIplusZtZD,D,REML,XtX,XtZ,ZtX)[0,0]
# Add back on constant term
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Add ground truth log likelihood
results.at['llh','Truth']=llh
# Get the ground truth degrees of freedom if running a T statistic simulation
if runDF:
# Contrast vector (1 in last place 0 elsewhere)
L = np.zeros(p)
L[-1] = 1
L = L.reshape(1,p)
v = groundTruth_TDF(X, Z, beta, sigma2, D, L, nlevels, nraneffs, tol)
results.at[indexVec[p+6+2*qu],'Truth']=v[0,0]
#===============================================================================
# fSFS
#===============================================================================
# Get the indices for the individual random factor covariance parameters.
DkInds = np.zeros(len(nlevels)+1)
DkInds[0]=np.int(p+1)
for k in np.arange(len(nlevels)):
DkInds[k+1] = np.int(DkInds[k] + nraneffs[k]*(nraneffs[k]+1)//2)
# Run Full Simplified Fisher Scoring
t1 = time.time()
paramVector_fSFS,_,nit,llh = fSFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record Time and number of iterations
results.at['Time','fSFS']=t2-t1
results.at['nit','fSFS']=nit
results.at['llh','fSFS']=llh
# Record parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'fSFS']=paramVector_fSFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'fSFS']=paramVector_fSFS[p,0]*paramVector_fSFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_fSFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'fSFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'fSFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'fSFS']=df[0,0]
#===============================================================================
# cSFS
#===============================================================================
# Run Cholesky Simplified Fisher Scoring
t1 = time.time()
paramVector_cSFS,_,nit,llh = cSFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','cSFS']=t2-t1
results.at['nit','cSFS']=nit
results.at['llh','cSFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'cSFS']=paramVector_cSFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'cSFS']=paramVector_cSFS[p,0]*paramVector_cSFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_cSFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'cSFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'cSFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'cSFS']=df[0,0]
#===============================================================================
# FS
#===============================================================================
# Run Fisher Scoring
t1 = time.time()
paramVector_FS,_,nit,llh = FS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','FS']=t2-t1
results.at['nit','FS']=nit
results.at['llh','FS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'FS']=paramVector_FS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'FS']=paramVector_FS[p,0]*paramVector_FS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_FS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'FS']=T[0,0]
results.at[indexVec[p+5+2*qu],'FS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'FS']=df[0,0]
#===============================================================================
# SFS
#===============================================================================
# Run Simplified Fisher Scoring
t1 = time.time()
paramVector_SFS,_,nit,llh = SFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','SFS']=t2-t1
results.at['nit','SFS']=nit
results.at['llh','SFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'SFS']=paramVector_SFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'SFS']=paramVector_SFS[p,0]*paramVector_SFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_SFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'SFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'SFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'SFS']=df[0,0]
#===============================================================================
# fFS
#===============================================================================
# Run Full Fisher Scoring
t1 = time.time()
paramVector_fFS,_,nit,llh = fFS2D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol, n, reml=REML, init_paramVector=None)
t2 = time.time()
# Add back on constant term for llh
if REML:
llh = llh - (n-p)/2*np.log(2*np.pi)
else:
llh = llh - n/2*np.log(2*np.pi)
# Record time and number of iterations
results.at['Time','fFS']=t2-t1
results.at['nit','fFS']=nit
results.at['llh','fFS']=llh
# Save parameters
for i in np.arange(3,p+qu+4):
results.at[indexVec[i],'fFS']=paramVector_fFS[i-3,0]
# Record D*sigma2
for i in np.arange(4+p,p+qu+4):
results.at[indexVec[i+qu],'fFS']=paramVector_fFS[p,0]*paramVector_fFS[i-3,0]
# If running a T statistic simulation...
if runDF:
# Get T statistic, p value and Satterthwaite degrees of freedom
T,Pval,df = simT(paramVector_fFS, XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ, nraneffs, nlevels, n)
results.at[indexVec[p+4+2*qu],'fFS']=T[0,0]
results.at[indexVec[p+5+2*qu],'fFS']=Pval[0,0]
results.at[indexVec[p+6+2*qu],'fFS']=df[0,0]
# Save results
results.to_csv(os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv'))
# ==================================================================================
#
# The below function collates the performance metrics for the parameter estimation
# simulations, prints summaries of the results and saves the results as csv files.
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations to be collated.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def performanceTables(desInd, OutDir, nsim=1000):
# Make row indices
row = ['sim'+str(i) for i in range(1,nsim+1)]
# Make column indices
col = ['FS','fFS','SFS','fSFS','cSFS','lmer']
#-----------------------------------------------------------------------------
# Work out timing stats
#-----------------------------------------------------------------------------
# Make timing table
timesTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
timesTable = timesTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the times
simTimes = results_table.loc['Time','FS':]
# Add them to the table
timesTable.loc['sim'+str(simInd),:]=simTimes
# Save computation times to csv file
timesTable.to_csv(os.path.join(OutDir,'timesTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of computation times')
print(timesTable.describe().to_string())
#-----------------------------------------------------------------------------
# Work out number of iteration stats
#-----------------------------------------------------------------------------
# Make timing table
nitTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
nitTable = nitTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the times
simNIT = results_table.loc['nit','FS':]
# Add them to the table
nitTable.loc['sim'+str(simInd),:]=simNIT
# Save number of iterations to csv file
nitTable.to_csv(os.path.join(OutDir,'nitTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of number of iterations')
print(nitTable.describe().to_string())
#-----------------------------------------------------------------------------
# Work out log-likelihood stats
#-----------------------------------------------------------------------------
# Make timing table
llhTable = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
llhTable = nitTable.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the log-likelihoods
simllh = results_table.loc['llh','FS':]
# Add them to the table
llhTable.loc['sim'+str(simInd),:]=simllh
# Save log likelihoods to csv file
llhTable.to_csv(os.path.join(OutDir,'llhTable.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of maximized log-likelihoods')
print(llhTable.describe().to_string())
# ==================================================================================
#
# The below function collates the MAE and MRD metrics for the parameter estimation
# simulations, prints summaries of the results and saves the results as csv files.
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations to be collated.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def differenceMetrics(desInd, OutDir, nsim=1000):
# Make row indices
row = ['sim'+str(i) for i in range(1,nsim+1)]
# Make column indices
col = ['FS','fFS','SFS','fSFS','cSFS','lmer']
#-----------------------------------------------------------------------------
# Work out absolute difference metrics for lmer
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the betas
simBetas = results_table.loc['beta1':'beta5',:]
if desInd==1:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D1,3',:]
if desInd==2:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D2,3',:]
if desInd==3:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D3,3',:]
# Work out the maximum absolute errors for betas
maxAbsErrBetas = (simBetas.sub(simBetas['lmer'], axis=0)).abs().max()
# Work out the maximum absolute errors for sigma2D
if desInd==1:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
if desInd==2:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
if desInd==3:
maxAbsErrVar = (simVar.sub(simVar['lmer'], axis=0)).abs().max()
# Add them to the tables
diffTableBetas.loc['sim'+str(simInd),:]=maxAbsErrBetas
diffTableVar.loc['sim'+str(simInd),:]=maxAbsErrVar
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for beta estimates (compared to lmer)')
print(diffTableBetas.describe().to_string())
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for variance estimates (compared to lmer)')
print(diffTableVar.describe().to_string())
# Save MAE values for lmer to csv
diffTableVar.to_csv(os.path.join(OutDir,'diffTableVar_lmer_abs.csv'))
diffTableBetas.to_csv(os.path.join(OutDir,'diffTableBetas_lmer_abs.csv'))
#-----------------------------------------------------------------------------
# Work out absolute difference metrics for Truth
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the betas
simBetas = results_table.loc['beta1':'beta5',:]
if desInd==1:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D1,3',:]
if desInd==2:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D2,3',:]
if desInd==3:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D3,3',:]
# Work out the maximum absolute errors for betas
maxAbsErrBetas = (simBetas.sub(simBetas['Truth'], axis=0)).abs().max()
# Work out the maximum absolute errors for sigma2D
if desInd==1:
maxAbsErrVar = (simVar.sub(simVar['Truth'], axis=0)).abs().max()
if desInd==2:
maxAbsErrVar = (simVar.sub(simVar['Truth'], axis=0)).abs().max()
if desInd==3:
maxAbsErrVar = (simVar.sub(simVar['Truth'], axis=0)).abs().max()
# Add them to the tables
diffTableBetas.loc['sim'+str(simInd),:]=maxAbsErrBetas
diffTableVar.loc['sim'+str(simInd),:]=maxAbsErrVar
# Save MAE values for truth to csv
diffTableVar.to_csv(os.path.join(OutDir,'diffTableVar_truth_abs.csv'))
diffTableBetas.to_csv(os.path.join(OutDir,'diffTableBetas_truth_abs.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for beta estimates (compared to truth)')
print(diffTableBetas.describe().to_string())
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MAE values for variance estimates (compared to truth)')
print(diffTableVar.describe().to_string())
#-----------------------------------------------------------------------------
# Work out relative difference metrics for lmer
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the betas
simBetas = results_table.loc['beta1':'beta5',:]
if desInd==1:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D1,3',:]
if desInd==2:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D2,3',:]
if desInd==3:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D3,3',:]
# Work out the maximum relative differences for betas
maxRelDiffBetas = (simBetas.sub(simBetas['lmer'], axis=0)).abs().div(simBetas.add(results_table.loc['beta1':'beta5','lmer'],axis=0)/2).max()
# Work out the maximum relative differences for sigma2D
if desInd==1:
maxRelDiffVar = (simVar.sub(simVar['lmer'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D1,3','lmer'],axis=0)/2).max()
if desInd==2:
maxRelDiffVar = (simVar.sub(simVar['lmer'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D2,3','lmer'],axis=0)/2).max()
if desInd==3:
maxRelDiffVar = (simVar.sub(simVar['lmer'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D3,3','lmer'],axis=0)/2).max()
# Add them to the tables
diffTableBetas.loc['sim'+str(simInd),:]=maxRelDiffBetas
diffTableVar.loc['sim'+str(simInd),:]=maxRelDiffVar
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MRD values for beta estimates (compared to lmer)')
print(diffTableBetas.describe().to_string())
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MRD values for variance estimates (compared to lmer)')
print(diffTableVar.describe().to_string())
# Save MRD values for lmer to csv
diffTableVar.to_csv(os.path.join(OutDir,'diffTableVar_lmer_rel.csv'))
diffTableBetas.to_csv(os.path.join(OutDir,'diffTableBetas_lmer_rel.csv'))
#-----------------------------------------------------------------------------
# Work out relative difference metrics for Truth
#-----------------------------------------------------------------------------
# Make difference tables
diffTableBetas = pd.DataFrame(index=row, columns=col)
diffTableVar = pd.DataFrame(index=row, columns=col)
# Make sure pandas knows the table is numeric
diffTableBetas = diffTableBetas.apply(pd.to_numeric)
diffTableVar = diffTableVar.apply(pd.to_numeric)
for simInd in range(1,nsim+1):
# Name of results file
results_file = os.path.join(OutDir,'Sim'+str(simInd)+'_Design'+str(desInd)+'_results.csv')
# Read in results file
results_table = pd.read_csv(results_file, index_col=0)
# Get the betas
simBetas = results_table.loc['beta1':'beta5',:]
if desInd==1:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D1,3',:]
if desInd==2:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D2,3',:]
if desInd==3:
# Get the variance components
simVar = results_table.loc['sigma2*D1,1':'sigma2*D3,3',:]
# Work out the maximum relative differences for betas
maxRelDiffBetas = (simBetas.sub(simBetas['Truth'], axis=0)).abs().div(simBetas.add(results_table.loc['beta1':'beta5','Truth'],axis=0)/2).dropna().max()
# Work out the maximum relative differences for sigma2D
if desInd==1:
maxRelDiffVar = (simVar.sub(simVar['Truth'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D1,3','Truth'],axis=0)/2).dropna().max()
if desInd==2:
maxRelDiffVar = (simVar.sub(simVar['Truth'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D1,3','Truth'],axis=0)/2).dropna().max()
if desInd==3:
maxRelDiffVar = (simVar.sub(simVar['Truth'], axis=0)).abs().div(simVar.add(results_table.loc['sigma2*D1,1':'sigma2*D1,3','Truth'],axis=0)/2).dropna().max()
# Add them to the tables
diffTableBetas.loc['sim'+str(simInd),:]=maxRelDiffBetas
diffTableVar.loc['sim'+str(simInd),:]=maxRelDiffVar
# Save MRD values for truth to csv
diffTableVar.to_csv(os.path.join(OutDir,'diffTableVar_truth_rel.csv'))
diffTableBetas.to_csv(os.path.join(OutDir,'diffTableBetas_truth_rel.csv'))
# Print summary
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MRD values for beta estimates (compared to truth)')
print(diffTableBetas.describe().to_string())
print(' ')
print('--------------------------------------------------------------------------')
print(' ')
print('Summary of MRD values for variance estimates (compared to truth)')
print(diffTableVar.describe().to_string())
# ==================================================================================
#
# The below function generates a ground truth degrees of freedom estimate for a
# given model.
#
# ----------------------------------------------------------------------------------
#
# - X: The fixed effects design matrix.
# - Z: The random effects design matrix.
# - beta: The true fixed effects parameters to be used for simulation.
# - sigma2: The true fixed effects variance to be used for simulation.
# - D: The true random effects covariance matrix to be used for simulation.
# - L: The contrast vector specifying which contrast we wish to estimate the degrees
# of freedom for.
# - nlevels: A vector containing the number of levels for each factor,
# e.g. `nlevels=[3,4]` would mean the first factor has 3
# levels and the second factor has 4 levels.
# - nraneffs: A vector containing the number of random effects for each
# factor, e.g. `nraneffs=[2,1]` would mean the first factor has
# random effects and the second factor has 1 random effect.
# - tol: Convergence tolerance for the parameter estimation method.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def groundTruth_TDF(X, Z, beta, sigma2, D, L, nlevels, nraneffs, tol):
# Required product matrices
XtX = X.transpose() @ X
XtZ = X.transpose() @ Z
ZtZ = Z.transpose() @ Z
# Inverse of (I+Z'ZD) multiplied by D
DinvIplusZtZD = forceSym2D(np.linalg.solve(np.eye(ZtZ.shape[0]) + D @ ZtZ, D))
# Get the true variance of LB
True_varLB = get_varLB2D(L, XtX, XtZ, DinvIplusZtZD, sigma2)
# Get the variance of the estimated variance of LB using the 3D code
var_est_varLB = get_VarhatLB2D(X, Z, beta, sigma2, D, L, nlevels, nraneffs, tol)
# Get ground truth degrees of freedom
v = 2*(True_varLB**2)/var_est_varLB
# Return result
return(v)
# ==================================================================================
#
# The below function estimates the variance of Var(LB) empirically. It takes the
# following inputs.
#
# ----------------------------------------------------------------------------------
#
# - X: The fixed effects design matrix.
# - Z: The random effects design matrix.
# - beta: The true fixed effects parameters to be used for simulation.
# - sigma2: The true fixed effects variance to be used for simulation.
# - D: The true random effects covariance matrix to be used for simulation.
# - L: The contrast vector specifying which contrast we wish to estimate the degrees
# of freedom for.
# - nlevels: A vector containing the number of levels for each factor,
# e.g. `nlevels=[3,4]` would mean the first factor has 3
# levels and the second factor has 4 levels.
# - nraneffs: A vector containing the number of random effects for each
# factor, e.g. `nraneffs=[2,1]` would mean the first factor has
# random effects and the second factor has 1 random effect.
# - tol: Convergence tolerance for the parameter estimation method.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def get_VarhatLB2D(X, Z, beta, sigma2, D, L, nlevels, nraneffs, tol):
# Work out dimensions
n = X.shape[0]
p = X.shape[1]
q = Z.shape[1]
qu = np.sum(nraneffs*(nraneffs+1)//2)
# Reshape to 3D dimensions
X = X.reshape((1,n,p))
Z = Z.reshape((1,n,q))
beta = beta.reshape((1,p,1))
D = D.reshape((1,q,q))
# New epsilon based on 1000 simulations
epsilon = np.random.randn(1000, n, 1)
# Work out cholesky of D
Dhalf = np.linalg.cholesky(D)
# New b based on 1000 simulations
b = Dhalf @ np.random.randn(1000,q,1)
# New Y based on 1000 simulations
Y = X @ beta + Z @ b + epsilon
# Delete b, epsilon, D, beta and sigma^2
del b, epsilon, D, beta, sigma2
# Calulcate product matrices
XtX = X.transpose(0,2,1) @ X
XtY = X.transpose(0,2,1) @ Y
XtZ = X.transpose(0,2,1) @ Z
YtX = Y.transpose(0,2,1) @ X
YtY = Y.transpose(0,2,1) @ Y
YtZ = Y.transpose(0,2,1) @ Z
ZtX = Z.transpose(0,2,1) @ X
ZtY = Z.transpose(0,2,1) @ Y
ZtZ = Z.transpose(0,2,1) @ Z
# Get parameter vector
paramVec = fSFS3D(XtX, XtY, ZtX, ZtY, ZtZ, XtZ, YtZ, YtY, YtX, nlevels, nraneffs, tol,n,reml=True)
# Get the indices in the paramvector corresponding to D matrices
IndsDk = np.int32(np.cumsum(nraneffs*(nraneffs+1)//2) + p + 1)
IndsDk = np.insert(IndsDk,0,p+1)
# Retrieve beta estimates
beta = paramVec[:, 0:p]
# Retrieve sigma2 estimates
sigma2 = paramVec[:,p:(p+1),:]
# Retrieve unique D estimates elements (i.e. [vech(D_1),...vech(D_r)])
vechD = paramVec[:,(p+1):,:].reshape((1000,qu))
# Reconstruct D estimates
Ddict = dict()
# D as a dictionary
for k in np.arange(len(nraneffs)):
Ddict[k] = vech2mat3D(paramVec[:,IndsDk[k]:IndsDk[k+1],:])
# Full version of D estimates
D = getDfromDict3D(Ddict, nraneffs, nlevels)
# Inverse of (I+Z'ZD) multiplied by D
DinvIplusZtZD = forceSym3D(np.linalg.solve(np.eye(q) + D @ ZtZ, D))
# Get variance of Lbeta estimates
varLB = get_varLB3D(L, XtX, XtZ, DinvIplusZtZD, sigma2, nraneffs)
# Estimated variance of varLB
varofvarLB = np.var(varLB,axis=0)
# Reshape and return
return(varofvarLB.reshape((1,1)))
# ==================================================================================
#
# The below function collates the t-statistics, p-values and degrees of freedom
# estimates for the T-statistic simulations, prints summaries of the results and
# saves the results as csv files.
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations to be collated.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def tOutput(desInd, OutDir, nsim=1000):
# Make row indices
row = ['sim'+str(i) for i in range(1,nsim+1)]
# Make column indices
col = ['Truth','FS','lmer']
#-----------------------------------------------------------------------------
# Work out timing stats
#-----------------------------------------------------------------------------
# Make timing table
tTable = pd.DataFrame(index=row, columns=col)
pTable = pd.DataFrame(index=row, columns=col)
dfTable = | pd.DataFrame(index=row, columns=col) | pandas.DataFrame |
"""
Tests for scalar Timedelta arithmetic ops
"""
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import NaT, Timedelta, Timestamp, offsets
import pandas._testing as tm
from pandas.core import ops
class TestTimedeltaAdditionSubtraction:
"""
Tests for Timedelta methods:
__add__, __radd__,
__sub__, __rsub__
"""
@pytest.mark.parametrize(
"ten_seconds",
[
Timedelta(10, unit="s"),
timedelta(seconds=10),
np.timedelta64(10, "s"),
np.timedelta64(10000000000, "ns"),
offsets.Second(10),
],
)
def test_td_add_sub_ten_seconds(self, ten_seconds):
# GH#6808
base = Timestamp("20130101 09:01:12.123456")
expected_add = Timestamp("20130101 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + ten_seconds
assert result == expected_add
result = base - ten_seconds
assert result == expected_sub
@pytest.mark.parametrize(
"one_day_ten_secs",
[
Timedelta("1 day, 00:00:10"),
Timedelta("1 days, 00:00:10"),
timedelta(days=1, seconds=10),
np.timedelta64(1, "D") + np.timedelta64(10, "s"),
offsets.Day() + offsets.Second(10),
],
)
def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs):
# GH#6808
base = Timestamp("20130102 09:01:12.123456")
expected_add = Timestamp("20130103 09:01:22.123456")
expected_sub = Timestamp("20130101 09:01:02.123456")
result = base + one_day_ten_secs
assert result == expected_add
result = base - one_day_ten_secs
assert result == expected_sub
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_datetimelike_scalar(self, op):
# GH#19738
td = Timedelta(10, unit="d")
result = op(td, datetime(2016, 1, 1))
if op is operator.add:
# datetime + Timedelta does _not_ call Timedelta.__radd__,
# so we get a datetime back instead of a Timestamp
assert isinstance(result, Timestamp)
assert result == Timestamp(2016, 1, 11)
result = op(td, Timestamp("2018-01-12 18:09"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22 18:09")
result = op(td, np.datetime64("2018-01-12"))
assert isinstance(result, Timestamp)
assert result == Timestamp("2018-01-22")
result = op(td, NaT)
assert result is NaT
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_td(self, op):
td = Timedelta(10, unit="d")
result = op(td, Timedelta(days=10))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=20)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_pytimedelta(self, op):
td = Timedelta(10, unit="d")
result = op(td, timedelta(days=9))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=19)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedelta64(self, op):
td = Timedelta(10, unit="d")
result = op(td, np.timedelta64(-4, "D"))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=6)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_offset(self, op):
td = Timedelta(10, unit="d")
result = op(td, offsets.Hour(6))
assert isinstance(result, Timedelta)
assert result == Timedelta(days=10, hours=6)
def test_td_sub_td(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_pytimedelta(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_pytimedelta()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_pytimedelta() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_timedelta64(self):
td = Timedelta(10, unit="d")
expected = Timedelta(0, unit="ns")
result = td - td.to_timedelta64()
assert isinstance(result, Timedelta)
assert result == expected
result = td.to_timedelta64() - td
assert isinstance(result, Timedelta)
assert result == expected
def test_td_sub_nat(self):
# In this context pd.NaT is treated as timedelta-like
td = Timedelta(10, unit="d")
result = td - NaT
assert result is NaT
def test_td_sub_td64_nat(self):
td = Timedelta(10, unit="d")
td_nat = np.timedelta64("NaT")
result = td - td_nat
assert result is NaT
result = td_nat - td
assert result is NaT
def test_td_sub_offset(self):
td = Timedelta(10, unit="d")
result = td - offsets.Hour(1)
assert isinstance(result, Timedelta)
assert result == Timedelta(239, unit="h")
def test_td_add_sub_numeric_raises(self):
td = Timedelta(10, unit="d")
for other in [2, 2.0, np.int64(2), np.float64(2)]:
with pytest.raises(TypeError):
td + other
with pytest.raises(TypeError):
other + td
with pytest.raises(TypeError):
td - other
with pytest.raises(TypeError):
other - td
def test_td_rsub_nat(self):
td = Timedelta(10, unit="d")
result = NaT - td
assert result is NaT
result = np.datetime64("NaT") - td
assert result is NaT
def test_td_rsub_offset(self):
result = offsets.Hour(1) - Timedelta(10, unit="d")
assert isinstance(result, Timedelta)
assert result == Timedelta(-239, unit="h")
def test_td_sub_timedeltalike_object_dtype_array(self):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")])
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
exp = np.array(
[
now - Timedelta("1D"),
Timedelta("0D"),
np.timedelta64(2, "h") - Timedelta("1D"),
]
)
res = arr - Timedelta("1D")
tm.assert_numpy_array_equal(res, exp)
def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")])
with pytest.raises(TypeError):
Timedelta("1D") - arr
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_timedeltalike_object_dtype_array(self, op):
# GH#21980
arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")])
exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_td_add_mixed_timedeltalike_object_dtype_array(self, op):
# GH#21980
now = Timestamp.now()
arr = np.array([now, Timedelta("1D")])
exp = np.array([now + Timedelta("1D"), Timedelta("2D")])
res = op(arr, Timedelta("1D"))
tm.assert_numpy_array_equal(res, exp)
# TODO: moved from index tests following #24365, may need de-duplication
def test_ops_ndarray(self):
td = Timedelta("1 day")
# timedelta, timedelta
other = pd.to_timedelta(["1 day"]).values
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td + np.array([1])
msg = r"unsupported operand type\(s\) for \+: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) + td
expected = pd.to_timedelta(["0 days"]).values
tm.assert_numpy_array_equal(td - other, expected)
tm.assert_numpy_array_equal(-other + td, expected)
msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'"
with pytest.raises(TypeError, match=msg):
td - np.array([1])
msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.array([1]) - td
expected = pd.to_timedelta(["2 days"]).values
tm.assert_numpy_array_equal(td * np.array([2]), expected)
tm.assert_numpy_array_equal(np.array([2]) * td, expected)
msg = (
"ufunc '?multiply'? cannot use operands with types"
r" dtype\('<m8\[ns\]'\) and dtype\('<m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
tm.assert_numpy_array_equal(td / other, np.array([1], dtype=np.float64))
tm.assert_numpy_array_equal(other / td, np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(["2000-01-01"]).values
expected = pd.to_datetime(["2000-01-02"]).values
tm.assert_numpy_array_equal(td + other, expected)
tm.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(["1999-12-31"]).values
tm.assert_numpy_array_equal(-td + other, expected)
tm.assert_numpy_array_equal(other - td, expected)
class TestTimedeltaMultiplicationDivision:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="d")
with pytest.raises(TypeError):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td).value == -1 * td.value
assert op(-1.0, td).value == -1.0 * td.value
with pytest.raises(TypeError):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError):
# invalid multiply with another timedelta
op(td, td)
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5.0
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="d")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="d")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
with pytest.raises(TypeError):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = | Timedelta(hours=3, minutes=4) | pandas.Timedelta |
import web
import pandas as pd
import numpy as np
import common
import os
import click
def hydro_op_chars_inputs_(webdb, project,
hydro_op_chars_sid,
balancing_type_project):
rows = webdb.where("inputs_project_hydro_operational_chars",
project=project,
hydro_operational_chars_scenario_id=hydro_op_chars_sid,
balancing_type_project=balancing_type_project).list()
if rows:
return pd.DataFrame(rows)
else:
raise common.NoEntriesError(f"Table inputs_project_hydro_operational_chars has no entries for project={project}, hydro_op_chars_scenario_id={hydro_op_chars_sid}, balancing_type_project={balancing_type_project}")
def hydro_op_chars_inputs(webdb, scenario, project):
hydro_op_chars_scenario_id = get_hydro_ops_chars_sceanario_id(webdb,
scenario, project)
balancing_type_project = get_balancing_type(webdb, scenario)
return hydro_op_chars_inputs_(webdb, project,
hydro_op_chars_scenario_id,
balancing_type_project)
def get_capacity(webdb,
scenario,
project):
capacity_scenario_id = get_project_specified_capacity_scenario_id(webdb,
scenario)
return common.get_field(webdb , "inputs_project_specified_capacity",
"specified_capacity_mw",
project=project,
project_specified_capacity_scenario_id=capacity_scenario_id)
def get_project_specified_capacity_scenario_id(webdb, scenario):
return common.get_field(webdb,
"scenarios",
"project_specified_capacity_scenario_id",
scenario_name=scenario)
def get_temporal_scenario_id(webdb, scenario):
return common.get_field(webdb,
"scenarios",
"temporal_scenario_id",
scenario_name=scenario)
def get_balancing_type(webdb, scenario):
temporal_scenario_id = get_temporal_scenario_id(webdb, scenario)
return common.get_field(webdb, "inputs_temporal_horizons",
"balancing_type_horizon",
temporal_scenario_id=temporal_scenario_id)
def get_temporal_start_end_table(conn, scenario):
temporal_id = get_temporal_scenario_id(conn, scenario)
temporal = conn.where("inputs_temporal_horizon_timepoints_start_end",
temporal_scenario_id=temporal_id).list()
return temporal
def get_power_mw_dataset(webdb, scenario, project):
scenario_id = common.get_field(webdb,
'scenarios',
"scenario_id",
scenario_name = scenario)
rows = webdb.where("results_project_dispatch",
scenario_id=scenario_id,
project=project,
operational_type='gen_hydro').list()
return | pd.DataFrame(rows) | pandas.DataFrame |
import logging
import os
import pandas as pd
from glob import glob
from pathlib import Path, PosixPath, WindowsPath
from ekorpkit.utils.func import elapsed_timer
log = logging.getLogger(__name__)
def get_filepaths(
filename_patterns, base_dir=None, recursive=True, verbose=True, **kwargs
):
if isinstance(filename_patterns, (PosixPath, WindowsPath)):
filename_patterns = str(filename_patterns)
if isinstance(filename_patterns, str):
filename_patterns = [filename_patterns]
filepaths = []
for file in filename_patterns:
file = os.path.join(base_dir, file) if base_dir else file
if os.path.exists(file):
if Path(file).is_file():
filepaths.append(file)
else:
filepaths += glob(file, recursive=recursive)
filepaths = [fp for fp in filepaths if Path(fp).is_file()]
if verbose:
log.info(f"Processing [{len(filepaths)}] files from {filename_patterns}")
return filepaths
def get_files_from_archive(archive_path, filetype=None):
import tarfile
from zipfile import ZipFile
if ".tar.gz" in archive_path:
log.info(f"::Extracting files from {archive_path} with tar.gz")
archive_handle = tarfile.open(archive_path, "r:gz")
files = [
(file, file.name) for file in archive_handle.getmembers() if file.isfile()
]
open_func = archive_handle.extractfile
elif ".tar.bz2" in archive_path:
log.info(f"::Extracting files from {archive_path} with tar.bz2")
archive_handle = tarfile.open(archive_path, "r:bz2")
files = [
(file, file.name) for file in archive_handle.getmembers() if file.isfile()
]
open_func = archive_handle.extractfile
elif ".zip" in archive_path:
log.info(f"::Extracting files from {archive_path} with zip")
archive_handle = ZipFile(archive_path)
files = [
(file, file.encode("cp437").decode("euc-kr"))
for file in archive_handle.namelist()
]
open_func = archive_handle.open
else:
# print(f'::{archive_path} is not archive, use generic method')
files = [(archive_path, os.path.basename(archive_path))]
archive_handle = None
open_func = None
if filetype:
files = [file for file in files if filetype in file[1]]
return files, archive_handle, open_func
def is_dataframe(data):
return isinstance(data, pd.DataFrame)
def concat_data(
data,
columns=None,
add_key_as_name=False,
name_column="_name_",
ignore_index=True,
verbose=False,
**kwargs,
):
if isinstance(data, dict):
log.info(f"Concatenating {len(data)} dataframes")
dfs = []
for df_name in data:
df_each = data[df_name]
if isinstance(columns, list):
_columns = [c for c in columns if c in df_each.columns]
df_each = df_each[_columns]
if add_key_as_name:
df_each[name_column] = df_name
dfs.append(df_each)
if len(dfs) > 0:
return pd.concat(dfs, ignore_index=ignore_index)
else:
return None
elif isinstance(data, list):
log.info(f"Concatenating {len(data)} dataframes")
if len(data) > 0:
return pd.concat(data, ignore_index=ignore_index)
else:
return None
else:
log.warning("Warning: data is not a dict")
return data
def load_data(filename, base_dir=None, verbose=False, **kwargs):
concatenate = kwargs.pop("concatenate", False)
ignore_index = kwargs.pop("ignore_index", False)
if base_dir:
filepaths = get_filepaths(filename, base_dir)
else:
filepaths = get_filepaths(filename)
log.info(f"Loading {len(filepaths)} dataframes from {filepaths}")
data = {
os.path.basename(f): load_dataframe(f, verbose=verbose, **kwargs)
for f in filepaths
}
data = {k: v for k, v in data.items() if v is not None}
if len(data) == 1:
return list(data.values())[0]
elif len(filepaths) > 1:
if concatenate:
return pd.concat(data.values(), ignore_index=ignore_index)
else:
return data
else:
log.warning(f"No files found for {filename}")
return None
def load_dataframe(
filename,
base_dir=None,
columns=None,
index_col=None,
verbose=False,
**kwargs,
):
dtype = kwargs.pop("dtype", None)
if isinstance(dtype, list):
dtype = {k: "str" for k in dtype}
parse_dates = kwargs.pop("parse_dates", False)
filetype = kwargs.pop("filetype", None) or "parquet"
fileinfo = os.path.splitext(filename)
filename = fileinfo[0]
filetype = fileinfo[1] if len(fileinfo) > 1 else filetype
filetype = "." + filetype.replace(".", "")
filename = f"{filename}{filetype}"
if base_dir is not None:
filepath = os.path.join(base_dir, filename)
else:
filepath = filename
if not os.path.exists(filepath):
log.warning(f"File {filepath} does not exist")
return None
log.info(f"Loading data from {filepath}")
with elapsed_timer(format_time=True) as elapsed:
if "csv" in filetype or "tsv" in filetype:
delimiter = kwargs.pop("delimiter", "\t") if "tsv" in filetype else None
data = pd.read_csv(
filepath,
index_col=index_col,
dtype=dtype,
parse_dates=parse_dates,
delimiter=delimiter,
)
elif "parquet" in filetype:
engine = kwargs.pop("engine", "pyarrow")
data = | pd.read_parquet(filepath, engine=engine) | pandas.read_parquet |
""" parquet compat """
from __future__ import annotations
from distutils.version import LooseVersion
import io
import os
from typing import Any, AnyStr, Dict, List, Optional, Tuple
from warnings import catch_warnings
from pandas._typing import FilePathOrBuffer, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas import DataFrame, MultiIndex, get_option
from pandas.core import generic
from pandas.io.common import (
IOHandles,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
def get_engine(engine: str) -> BaseImpl:
""" return our implementation """
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
engine_classes = [PyArrowImpl, FastParquetImpl]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"A suitable version of "
"pyarrow or fastparquet is required for parquet "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
def _get_path_or_handle(
path: FilePathOrBuffer,
fs: Any,
storage_options: StorageOptions = None,
mode: str = "rb",
is_dir: bool = False,
) -> Tuple[FilePathOrBuffer, Optional[IOHandles], Any]:
"""File handling for PyArrow."""
path_or_handle = | stringify_path(path) | pandas.io.common.stringify_path |
# link: https://github.com/liulingbo918/ATFM/tree/master/data/TaxiNYC
import h5py
import pandas as pd
import numpy as np
import json
import util
outputdir = 'output/NYCTAXI20140112'
util.ensure_dir(outputdir)
dataurl = 'input/NYCTAXI20140112/'
dataname = outputdir+'/NYCTAXI20140112'
f = h5py.File(dataurl + 'NYC2014.h5', 'r')
date_df = pd.DataFrame(np.array(f['date']))
data = np.array(f['data'])
def get_geo():
li = []
ind = 0
for x in range(15):
for y in range(5):
li.append([ind, "Polygon", "[]", x, y])
ind += 1
return li
def remove_imcomplete_days(data, timestamps, t=48):
print("before removing", len(data))
date = []
days = []
days_incomplete = []
i = 0
print(len(timestamps))
while i < len(timestamps):
if int(str(timestamps[i])[10:12]) != 1:
i += 1
elif i + t - 1 < len(timestamps) \
and int(str(timestamps[i + t - 1])[10:12]) == t:
for j in range(48):
date.append(timestamps[i + j])
days.append(str(timestamps[i])[2:10])
i += t
else:
days_incomplete.append(str(timestamps[i])[2:10])
i += 1
print("imcomplete days", days_incomplete)
days = set(days)
idx = []
for i, t in enumerate(timestamps):
if str(timestamps[i])[2:10] in days:
idx.append(i)
data_ = data[idx]
print(len(date))
print(len(data_))
return date, data_
def del_date(date):
date_str = str(date)
s0 = date_str[2:6]
s1 = date_str[6:8]
s2 = date_str[8:10]
s3 = date_str[10:12]
num_s3 = int(s3) - 1
num = 0
if num_s3 % 2 == 0:
num = int(num_s3 * 0.5)
if num < 10:
str_s3 = '0' + str(num)
else:
str_s3 = str(num)
s = s0 + '-' + s1 + '-' + s2 + 'T' + str_s3 + ':00:00' + 'Z'
else:
num = (num_s3 - 1) * 0.5
num = int(num)
if num < 10:
str_s3 = '0' + str(num)
else:
str_s3 = str(num)
s = s0 + '-' + s1 + '-' + s2 + 'T' + str_s3 + ':30:00' + 'Z'
return s
new_date, new_data = remove_imcomplete_days(np.array(f['data']),
np.array(f['date']))
date_df = pd.DataFrame(new_date)
date_df['time'] = date_df[0].apply(del_date)
def get_dyna():
ind = 0
li = []
for x in range(15):
for y in range(5):
for time in range(len(date_df['time'])):
li.append([ind, "state", date_df['time'][time], x, y,
new_data[time][0][x][y], new_data[time][1][x][y]])
ind += 1
return li
L0 = get_geo()
pd.DataFrame(L0, columns=["geo_id", "type", "coordinates", "row_id",
"column_id"]).to_csv(dataname + '.geo', index=None)
L1 = get_dyna()
pd.DataFrame(L1, columns=["dyna_id", "type", "time", "row_id",
"column_id", "pickup", "dropoff"])\
.to_csv(dataname + '.grid', index=None)
ext = h5py.File(dataurl + 'Meteorology.h5', 'r')
date = np.array(ext['date'])
Temperature = np.array(ext['Temperature'])
Weather = np.array(ext['Weather'])
WindSpeed = np.array(ext['WindSpeed'])
datenew = []
for da in date:
datenew.append(del_date(da))
ext_id = np.array(range(len(datenew)))
extdf = | pd.DataFrame() | pandas.DataFrame |
# %% Dependencies and variables' definitions.
import pandas as pd
import geopandas as gpd
from osmi_helpers import data_gathering as osmi_dg
# Define Data Sources
ARBRAT_VIARI_URL = "https://opendata-ajuntament.barcelona.cat/data/dataset/27b3f8a7-e536-4eea-b025-ce094817b2bd/resource/28034af4-b636-48e7-b3df-fa1c422e6287/download"
ARBRAT_ZONA_URL = "https://opendata-ajuntament.barcelona.cat/data/dataset/9b525e1d-13b8-48f1-abf6-f5cd03baa1dd/resource/8f2402dd-72dc-4b07-8145-e3f75004b0de/download"
CSV_PARSER = 'fields_mapping.csv'
# %% [markdown]
# # Barcelona Trees' import.
#
# ## Goals¶
#
# The goal is to manually merge and import all the trees' information provided by Barcelona City Council, while testing the scripts for data preparation.
#
# ## Data Sources¶
#
# Two datasets provided by Barcelona City Council will be used:
#
# * [Arbrat viari](https://opendata-ajuntament.barcelona.cat/data/ca/dataset/arbrat-viari): Name of the species and geolocation of the trees of the city of Barcelona located on public roads. The information contains, among other data, the scientific name, the common name, the height, the direction and the width of the sidewalk... The trees of the parks are not included. The coordinates are expressed in the ETRS89 reference system. This dataset complemens of Zone trees of the city of Barcelona. Historical resources which contain data available until the last week of the term are published. The resources are ordered by year and term, information that can be found in the name of the resource.
# * [Arbrat zona](https://opendata-ajuntament.barcelona.cat/data/ca/dataset/arbrat-zona): Name of the species and geolocation of the trees of the city of Barcelona located on public roads. The information contains, among other data, the scientific name, the common name, the height, the direction and the width of the sidewalk... The trees of the parks are not included. The coordinates are expressed in the ETRS89 reference system. This dataset complemens of Street trees of the city of Barcelona. Historical resources which contain data available until the last week of the term are published. The resources are ordered by year and term, information that can be found in the name of the resource.
#
# ## License
#
# We have an express authorization from the Barcelona city council for the reuse of open data published on theirs open Government website
#
# 
#
# ## Import type¶
#
# This import will be done manually, using JOSM to edit the data. Consider using Task Manager.
#
# ## Data preparations¶
#
# All data preparations will be made automatically in this notebook.
#
# ### Fields' mapping.
# %% Read CSV file with fields' mapping and description.
fields_mapping = pd.read_csv(CSV_PARSER)
# Display table.
fields_mapping
# %% [markdown]
# ## Import script.
# ### Data Gathering
# %% Data Gathering
# Download a file and convert it into a dataframe.
df_aviari = pd.read_csv(ARBRAT_VIARI_URL)
df_azona = pd.read_csv(ARBRAT_ZONA_URL)
# Combine both data sources into a single one.
df_raw = | pd.concat([df_aviari, df_azona]) | pandas.concat |
from keras.layers.core import Dense, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def data_preparation(company_input_data):
"""
funkcja wykonująca przygotowanie danych przez normalizacje.
:param company_input_data: dane giełdowe firmy.
:return: zwraca tablice x , y i informacje normalizujące
"""
company_val_data = company_input_data.iloc[:, 1:2].values
scalar = MinMaxScaler(feature_range=(0, 1))
apple_training_scaled = scalar.fit_transform(company_val_data)
features= []
labels = []
for i in range(60, company_val_data.size):
features.append(apple_training_scaled[i - 60:i, 0])
labels.append(apple_training_scaled[i, 0])
features, labels = np.array(features), np.array(labels)
features = np.reshape(features, (features.shape[0], features.shape[1], 1))
return features, labels, scalar
def model_training(features_set, labels):
"""
Funkcja wykonujące uczenie modelu. Zbiera tez parametry uczenia w tym walidacyjne.
:param features_set: tablica wartosci x.
:param labels: tablica wartosci y.
:return: zwraca nauczony model.
"""
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(features_set.shape[1], 1)))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(units=50))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(optimizer='adam', loss='mean_squared_error')
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(features_set, labels, epochs=100, batch_size=32, validation_split=0.1,
callbacks=[tensorboard_callback])
return model
def prediction(model_obj, scalar, company_testing_complete_data, company_data):
"""
Funkcja przewidująca notowanie giełdowe.
:param model_obj: nauczony model.
:param scalar: informacje o
:param company_testing_complete_data: dane testowe.
:param company_data: dane notowań.
:return: przewidywania.
"""
test_features = get_test_feature(company_data, company_testing_complete_data, scalar)
predictions = model_obj.predict(test_features)
predictions = scalar.inverse_transform(predictions)
return predictions
def get_test_feature(company_data, company_testing_complete_data, scaler):
"""
Funkcja przygtoująca dane testowe.
:param company_data: notowania giedowe.
:param company_testing_complete_data: dane testowe.
:param scaler: informacje normalizujące.
:return: przetworzone dane testowe.
"""
apple_total = pd.concat((company_data['Open'], company_testing_complete_data['Open']), axis=0)
test_inputs = apple_total[len(apple_total) - len(company_testing_complete_data) - 60:].values
test_inputs = test_inputs.reshape(-1, 1)
test_inputs = scaler.transform(test_inputs)
test_features = []
for i in range(60, 80):
test_features.append(test_inputs[i - 60:i, 0])
test_features = np.array(test_features)
test_features = np.reshape(test_features, (test_features.shape[0], test_features.shape[1], 1))
return test_features
def trend_modelling(train_data, test_data, label):
"""
Funkcja prezntująca wyniki przewidywań.
:param train_data: dane wynikajace z przewidywań.
:param test_data: dane testowe.
:param label: nazwa firmy.
"""
company_input_data = | pd.read_csv(train_data) | pandas.read_csv |
#!/usr/bin/python
print('financials_update_quarterly - initiating. Printing Stock and % Progress.')
import os
import pandas as pd
from datetime import date
pd.set_option('display.max_columns', None)
pd.options.display.float_format = '{:20,.2f}'.format
pd.options.mode.use_inf_as_na = True
cwd = os.getcwd()
input_folder = "0_input"
prices_folder = "data"
output_folder = "0_output"
temp_folder = "temp"
prices_temp = "prices"
financials_temp = "financials_quarterly"
#check year
todays_date = date.today()
curr_year = todays_date.year
# prepare tickers list
tickers_narrowed = pd.read_csv(os.path.join(cwd,"0_symbols.csv"))
ticker_narrowed = tickers_narrowed.values.tolist()
tickers = ' '.join(tickers_narrowed["symbol"].astype(str)).strip()
# find last updated ticker (this is necessary if you lose internet connection, etc)
financials_quarterly_last_ticker = pd.read_csv(os.path.join(cwd,input_folder,temp_folder,"financials_quarterly_last_ticker.csv"),index_col=0)
last_ticker_n = financials_quarterly_last_ticker.values[0]
last_ticker_nn = last_ticker_n[0]
print("last ticker in financials_quarterly was number ", last_ticker_nn)
# start importing
index_max = pd.to_numeric(tickers_narrowed.index.values.max())
from yahoo_fin.stock_info import * #initiate yahoo_fin
financials_table = []
company_info = []
for t in tickers.split(' '):
try:
n = pd.to_numeric(tickers_narrowed["symbol"][tickers_narrowed["symbol"] == t].index).values
if n > last_ticker_n:
# check if last quarter is recent (many tickers are dead for example)
df_yf_stats = get_stats(t)
df_check_mrq = df_yf_stats["Value"][df_yf_stats["Attribute"] == "Most Recent Quarter (mrq)"]
datetime_object = pd.to_datetime(df_check_mrq) # , errors='coerce')
df_mrq_year = datetime_object.dt.year
mrq_year = df_mrq_year.values[0]
if (mrq_year + 1) >= curr_year:
# first loop through "values" in "dictionary"
df_yf_financials = get_financials(t, yearly=False, quarterly=True)
values_table = []
for keys, values in df_yf_financials.items():
#df_keys = keys #we dont need "keys"
df = values
df.reset_index(drop=False, inplace=True)
df.columns.values[[0, 1, 2, 3, 4]] = ['Breakdown', 't0', 't-1', 't-2', 't-3']
values_table.append(df)
values_table = pd.concat(values_table)
values_table = values_table[~values_table['Breakdown'].duplicated(keep='first')] #catching double entries in values to properly reset the index
values_table.drop_duplicates()
values_table.reset_index(drop=True, inplace=True)
values_table.set_index('Breakdown', inplace=True)
# transpose financials
df_T = values_table.T
df_T.rename(columns={'netTangibleAssets':'NAV'}, inplace=True)
df_T['WC'] = df_T['totalCurrentAssets'] - df_T['totalCurrentLiabilities']
df_T['symbol'] = t
df_T['Period'] = df_T.index
#get statistics
#df_yf_stats = get_stats(t)
df_yf_stats.reset_index(drop=True, inplace=True)
df_yf_stats.set_index('Attribute', inplace=True)
df_stats = df_yf_stats.T
df_stats['symbol'] = t
df_stats.reset_index(drop=True, inplace=True)
# get "quote data"
df_yf_quote_data = get_quote_data(t)
df_yf_quote_data_1 = pd.Series(df_yf_quote_data)
df_yf_quote_data_2 = pd.DataFrame(df_yf_quote_data_1).T
df_yf_quote_data_2.reset_index(drop=False, inplace=True)
df_yf_quote_data_2 = df_yf_quote_data_2.drop(columns=['index'])
#get company info
df_yf_info = get_company_info(t)
df_yf_info.reset_index(drop=False, inplace=True)
df_yf_info = df_yf_info[~df_yf_info['Breakdown'].duplicated(keep='first')] #catching double entries in values to properly reset the index
df_yf_info.set_index('Breakdown', inplace=True)
df_info = df_yf_info.T
df_info['symbol'] = t
df_info.reset_index(drop=False, inplace=True)
# merge
# financials to quote data
to_merge = df_T
df_merged = pd.merge(df_T, df_yf_quote_data_2, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop'))
df_merged.drop([col for col in df_merged.columns if 'drop' in col], axis=1, inplace=True)
df_merged.drop_duplicates()
df_merged.reset_index(drop=True, inplace=True)
# to stats
to_merge = df_merged
df_merged = pd.merge(to_merge, df_stats, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop'))
df_merged.drop([col for col in df_merged.columns if 'drop' in col], axis=1, inplace=True)
df_merged.drop_duplicates()
df_merged.reset_index(drop=True, inplace=True)
# to info
to_merge = df_merged
df_merged = | pd.merge(to_merge, df_info, how='left', left_on=['symbol'], right_on=['symbol'], suffixes=('', '_drop')) | pandas.merge |
import datetime
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from numpy.linalg import inv
from scipy.linalg import sqrtm
from sklearn import covariance
from sklearn.base import BaseEstimator
from sklearn.covariance import EmpiricalCovariance
from sklearn.decomposition import PCA
from statsmodels.api import OLS
from statsmodels.tools import add_constant
from .. import tools
# expenses + tax dividend
EXPENSES = {
"CASH": 0.0,
"TMF": 0.0108,
"DPST": 0.0104,
"ASHR": 0.0065,
"TQQQ": 0.0095,
"UGLD": 0.0135,
"ERX": 0.01,
"RING": 0.0039,
"LABU": 0.0109,
"YINN": 0.0152,
"SOXL": 0.0097,
"RETL": 0.0105,
"TYD": 0.0097,
"UDOW": 0.0095,
"GBTC": 0.02,
"FAS": 0.0096,
"MCHI": 0.0064,
"CQQQ": 0.0070,
"CHIX": 0.0065,
"UBT": 0.0095,
"FXI": 0.0074,
"DRN": 0.0109,
"O": 0 + 0.045 * 0.15,
"DSUM": 0.0045 + 0.035 * 0.15,
"SPY": 0.0009,
"TLT": 0.0015,
"ZIV": 0.0135,
"GLD": 0.004,
"BABA": 0.0,
"BIDU": 0.0,
"IEF": 0.0015,
"KWEB": 0.007,
"JPNL": 0.0121,
"EDC": 0.0148,
"EEMV.L": 0.0025,
"IWVL.L": 0.003,
"MVEU.L": 0.0025,
"USMV": 0.0015,
"ACWV": 0.002,
"EFAV": 0.002,
"KRE": 0.0035,
"EEM": 0.0068,
"VNQ": 0.0012 + 0.0309 * 0.15,
"EWJ": 0.0049,
"HYG": 0.0049,
"VLUE": 0.0004,
"SPMV": 0.001,
"IDWP.L": 0.0069,
"ZN": 0.0,
"RFR": 0.0,
}
class CovarianceEstimator(object):
"""Estimator which accepts sklearn objects.
:param w: regularization from paper `Enhanced Portfolio Optimization`, value 0 means no regularization,
value 1 means to ignore covariances
:param frequency: how often should we recalculate covariance matrix, used to speed up MPT prototyping
"""
def __init__(self, cov_est, window, standardize=True, w=0.0, frequency=1):
self.cov_est = cov_est
self.window = window
self.standardize = standardize
self.w = w
self.frequency = frequency
self._last_cov = None
self._last_n = 0
def fit(self, X):
# assert X.mean().mean() < 1.
# reuse covariance matrix
if (
self.frequency > 1
and len(X) - self._last_n < self.frequency
and list(X.columns) == list(self._last_cov.columns)
):
return self._last_cov
# only use last window
if self.window:
X = X.iloc[-self.window :]
# remove zero-variance elements
zero_variance = X.std() == 0
Y = X.iloc[:, ~zero_variance.values]
# most estimators assume isotropic covariance matrix, so standardize before feeding them
std = Y.std()
Y = Y / std
# can estimator handle NaN values?
if getattr(self.cov_est, "allow_nan", False):
self.cov_est.fit(Y)
cov = pd.DataFrame(
self.cov_est.covariance_, index=Y.columns, columns=Y.columns
)
else:
# compute full covariance for non-NaN columns
Yn = Y.dropna(1, how="any")
full_cov = self.cov_est.fit(Yn).covariance_
full_cov = pd.DataFrame(full_cov, index=Yn.columns, columns=Yn.columns)
full_cov = full_cov.reindex(Y.columns).reindex(columns=Y.columns)
# put back NaN columns one by one, compute covariance using
# available history
cols = list(Yn.columns)
for col in set(Y.columns) - set(Yn.columns):
cols.append(col)
c = Y[cols].dropna().cov().loc[col]
full_cov.loc[col, cols] = c
full_cov.loc[cols, col] = c
cov = full_cov.loc[Y.columns, Y.columns]
# standardize back
cov = np.outer(std, std) * cov
# put back zero covariance
cov = cov.reindex(X.columns).reindex(columns=X.columns).fillna(0.0)
# turn on?
# assert np.linalg.eig(cov)[0].min() > 0
# annualize covariance
cov *= tools.freq(X.index)
# regularize
cov = (1 - self.w) * cov + self.w * np.diag(np.diag(cov))
# CASH should have zero covariance
if "CASH" in X.columns:
cov.loc["CASH", :] = 0
cov.loc[:, "CASH"] = 0
self._last_cov = cov
self._last_n = len(X)
return cov
class SharpeEstimator(object):
def __init__(
self,
global_sharpe=0.4,
override_sharpe=None,
override_mean=None,
capm=None,
rfr=0.0,
verbose=False,
cov_estimator=None,
tax_adjustment=None,
):
"""
:param rfr: risk-free rate
"""
self.override_sharpe = override_sharpe or {}
self.override_mean = override_mean or {}
self.capm = capm or {}
self.global_sharpe = global_sharpe
self.rfr = rfr
self.verbose = verbose
self.cov_estimator = cov_estimator
self.tax_adjustment = tax_adjustment
def fit(self, X, sigma):
"""
formula for mean is:
sh * vol + rf - expenses
"""
# estimate sigma again if cov_estimator is present
if self.cov_estimator is not None:
sigma = self.cov_estimator.fit(X - 1)
est_sh = pd.Series(self.global_sharpe, index=sigma.index)
for k, v in self.override_sharpe.items():
if k in est_sh:
est_sh[k] = v
if isinstance(self.rfr, pd.Series):
rfr = self.rfr.loc[X.index[-1]]
else:
rfr = self.rfr
# assume that all assets have yearly sharpe ratio 0.5 and deduce return from volatility
vol = pd.Series(np.sqrt(np.diag(sigma)), index=sigma.index)
if self.verbose:
missing_expenses = set(sigma.index) - set(EXPENSES.keys())
if missing_expenses:
logging.warning("Missing ETF expense for {}".format(missing_expenses))
expenses = pd.Series(
[EXPENSES.get(c, 0.0) for c in sigma.index], index=sigma.index
)
mu = est_sh * vol + rfr - expenses
# adjust CASH - note that CASH has -1.5% fee from IB
if "CASH" in X.columns:
mu["CASH"] = X.CASH[-1] ** (tools.freq(X.index)) - 1
for asset, item in self.capm.items():
if isinstance(item, list):
markets = item
alpha = 0.0
elif isinstance(item, dict):
markets = item["market"]
alpha = item["alpha"]
if asset in X.columns:
mu[asset] = self._capm_mu(asset, markets, mu, sigma, X) + alpha
if self.override_mean:
for k, v in self.override_mean.items():
if k in mu.index:
mu.loc[k] = v
if self.tax_adjustment:
mu = self.tax_adjustment.fit(mu, sigma)
if self.verbose:
print(
pd.DataFrame(
{
"volatility": vol,
"mean": mu,
}
)
)
return mu
def _capm_mu(self, asset, markets, mu, sigma, X):
"""Calculate mean estimated by CAPM."""
freq = tools.freq(X.index)
X = X[[asset] + markets].dropna()
res = OLS(
X[asset] - 1 - self.rfr / freq,
add_constant(X[markets] - 1 - self.rfr / freq),
).fit()
beta = res.params.drop(["const"])
prev_mu = mu[asset]
new_mu = self.rfr + (mu[markets] - self.rfr).dot(beta)
alpha = res.params.const * freq
alpha_std = freq * np.sqrt(res.cov_params().loc["const", "const"])
if self.verbose:
print(
f"Beta of {[x for x in beta.round(2)]} changed {asset} mean return from {prev_mu:.1%} to {new_mu:.1%} with alpha {alpha:.2%} ({alpha_std:.2%})"
)
# be benevolent and add alpha if it is positive
# k = 0.2 was fine tuned on DPST in order to get it out of the portfolio
k = 0.2
if alpha - k * alpha_std > 0 and asset in ("KRE", "DPST"):
if self.verbose:
print(f" Adding alpha of {alpha - k * alpha_std:.2%} for {asset}")
new_mu += alpha - k * alpha_std
return new_mu
class MuVarianceEstimator(object):
def fit(self, X, sigma):
# assume that all assets have yearly sharpe ratio 1 and deduce return from volatility
mu = np.matrix(sigma).dot(np.ones(sigma.shape[0]))
return mu
class HistoricalEstimator(object):
def __init__(self, window):
self.window = window
def fit(self, X, sigma):
if self.window:
X = X.iloc[-self.window :]
mu = X.mean()
mu = (1 + mu) ** tools.freq(X.index) - 1
return mu
class MixedEstimator(object):
"""Combines historical estimation with sharpe estimation from volatility.
Has two parameters alpha and beta that works like this:
alpha in (0, 1) controls regularization of covariance matrix
alpha = 0 -> assume covariance is zero
alpha = 1 -> don't regularize
beta in (0, inf) controls weight we give on historical mean
beta = 0 -> return is proportional to volatility if alpha = 0 or row sums
of covariance matrix if alpha = 1
beta = inf -> use historical return
"""
def __init__(self, window=None, alpha=0.0, beta=0.0):
self.GLOBAL_SHARPE = SharpeEstimator.GLOBAL_SHARPE
self.historical_estimator = HistoricalEstimator(window=window)
self.alpha = alpha
self.beta = beta
def fit(self, X, sigma):
alpha = self.alpha
beta = self.beta
m = X.shape[1]
# calculate historical return
historical_mu = self.historical_estimator.fit(X, sigma)
# regularize sigma
reg_sigma = alpha * sigma + (1 - alpha) * np.diag(np.diag(sigma))
# avoid computing inversions
if beta == 0:
mu = self.GLOBAL_SHARPE * np.real(sqrtm(reg_sigma)).dot(np.ones(m))
else:
# estimate mean
mu_tmp = beta * historical_mu + self.GLOBAL_SHARPE * inv(
np.real(sqrtm(reg_sigma))
).dot(np.ones(m))
mu = inv(inv(reg_sigma) + beta * np.eye(m)).dot(mu_tmp)
return pd.Series(mu, index=X.columns)
class PCAEstimator(object):
def __init__(self, window, n_components="mle"):
self.window = window
self.n_components = n_components
def fit(self, X, sigma):
# take recent period (PCA could be estimated from sigma too)
R = X.iloc[-self.window :].fillna(0.0)
pca = PCA(n_components=self.n_components).fit(R)
pca_mu = np.sqrt(pca.explained_variance_) * 0.5 * np.sqrt(tools.freq(X.index))
comp = pca.components_.T
# principal components have arbitraty orientation -> choose orientation to maximize final mean return
comp = comp * np.sign(comp.sum(0))
pca_mu = comp.dot(pca_mu)
pca_mu = pd.Series(pca_mu, index=X.columns)
return pca_mu
class MLEstimator(object):
"""Predict mean using sklearn model."""
def __init__(self, model, freq="M"):
self.model = model
self.freq = freq
def featurize(self, H):
X = pd.DataFrame(
{
"last_sh": H.shift(1).stack(),
"history_sh": pd.expanding_mean(H).shift(1).stack(),
"history_sh_vol": pd.expanding_std(H).shift(1).stack(),
"nr_days": H.notnull().cumsum().stack(),
}
)
return X
def fit(self, X, sigma):
# work with sharpe ratio of log returns (assume raw returns)
R = np.log(X + 1)
H = R.resample(
self.freq, how=lambda s: s.mean() / s.std() * np.sqrt(tools.freq(X.index))
)
# calculate features
XX = self.featurize(H)
yy = H.stack()
# align training data and drop missing values
XX = XX.dropna()
yy = yy.dropna()
XX = XX.loc[yy.index].dropna()
yy = yy.loc[XX.index]
# fit model on historical data
self.model.fit(XX, yy)
# print(self.model.intercept_, pd.Series(self.model.coef_, index=XX.columns))
# make predictions for all assets with features
XX_pred = XX.loc[XX.index[-1][0]]
pred_sh = self.model.predict(XX_pred)
pred_sh = pd.Series(pred_sh, index=XX_pred.index)
# assume 0.5 sharpe for assets with missing features
pred_sh = pred_sh.reindex(X.columns).fillna(0.5)
# convert predictions from sharpe ratio to means
mu = pred_sh * np.diag(sigma)
return mu
class SingleIndexCovariance(BaseEstimator):
"""Estimation of covariance matrix by Ledoit and Wolf (http://www.ledoit.net/ole2.pdf).
It combines sample covariance matrix with covariance matrix from single-index model and
automatically estimates shrinking parameter alpha.
Assumes that first column represents index.
Note that Ledoit-Wolf is already implemented in scikit-learn.
"""
def __init__(self, alpha=None):
self.alpha = alpha
def _sample_covariance(self, X):
return EmpiricalCovariance().fit(X).covariance_
def _single_index_covariance(self, X, S):
# estimate beta from CAPM (use precomputed sample covariance to calculate beta)
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line
var_market = S[0, 0]
y = X[:, 0]
beta = S[0, :] / var_market
alpha = np.mean(X, 0) - beta * np.mean(y)
# get residuals and their variance
eps = X - alpha - np.matrix(y).T * np.matrix(beta)
D = np.diag(np.var(eps, 0))
return var_market * np.matrix(beta).T * np.matrix(beta) + D
def _P(self, X, S):
Xc = X - np.mean(X, 0)
T, N = X.shape
P = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
P[i, j] = P[j, i] = sum((Xc[:, i] * Xc[:, j] - S[i, j]) ** 2)
return P / T
def _rho(self, X, S, F, P):
Xc = X - np.mean(X, 0)
T, N = X.shape
R = np.zeros((N, N))
for i in range(N):
for j in range(i, N):
g = (
S[j, 0] * S[0, 0] * Xc[:, i]
+ S[i, 0] * S[0, 0] * Xc[:, j]
- S[i, 0] * S[j, 0] * Xc[:, 0]
) / S[0, 0] ** 2
R[i, j] = R[j, i] = (
1.0
/ T
* sum(g * Xc[:, 0] * Xc[:, i] * Xc[:, j] - F[i, j] * S[i, j])
)
return np.sum(R)
def _gamma(self, S, F):
return np.sum((F - S) ** 2)
def _optimal_alpha(self, X, S, F):
T = X.shape[0]
P = self._P(X, S)
phi = np.sum(P)
gamma = self._gamma(S, F)
rho = self._rho(X, S, F, P)
return 1.0 / T * (phi - rho) / gamma
def fit(self, X):
# use implicitely with arrays
X = np.array(X)
# sample and single-index covariance
S = self._sample_covariance(X)
F = self._single_index_covariance(X, S)
alpha = self.alpha or self._optimal_alpha(X, S, F)
S_hat = alpha * F + (1 - alpha) * S
self.covariance_ = S_hat
self.optimal_alpha_ = alpha
return self
class HistoricalSharpeEstimator(object):
def __init__(
self,
window=None,
alpha=1e10,
override_sharpe=None,
prior_sharpe=0.3,
max_sharpe=100.0,
max_mu=100.0,
):
self.window = window
self.alpha = alpha
self.prior_sharpe = prior_sharpe
self.max_sharpe = max_sharpe
self.max_mu = max_mu
self.override_sharpe = override_sharpe or {}
def fit(self, X, sigma):
if self.window:
X = X.iloc[-self.window :]
# get mean and variance of sharpe ratios
mu_sh = tools.sharpe(X)
var_sh = tools.sharpe_std(X) ** 2
# combine prior sharpe ratio with observations
alpha = self.alpha
est_sh = (mu_sh / var_sh + self.prior_sharpe * alpha) / (1.0 / var_sh + alpha)
est_sh = np.minimum(est_sh, self.max_sharpe)
# override sharpe ratios
for k, v in self.override_sharpe.items():
if k in est_sh:
est_sh[k] = v
mu = est_sh * pd.Series(np.sqrt(np.diag(sigma)), index=sigma.index)
mu = np.minimum(mu, self.max_mu)
# print(est_sh[{'XIV', 'ZIV', 'UGAZ'} & set(est_sh.index)].to_dict())
return mu
def ar(vals, frac):
r = list(vals[:1])
for v in vals[1:]:
r.append(frac * r[-1] + v)
return r
class FractionalCovariance(covariance.OAS):
def __init__(self, frac, *args, **kwargs):
self.frac = frac
super().__init__(*args, **kwargs)
def fit(self, Y):
# calculate fractional returns
logY = np.log(Y)
fracY = ar(logY, self.frac)
return super().fit(fracY)
class ExponentiallyWeightedCovariance(BaseEstimator):
def __init__(self, span):
self.span = span
def fit(self, X):
alpha = 2 / (self.span + 1)
w = (1 - alpha) ** np.arange(len(X))[::-1]
w = np.tile(w, (X.shape[1], 1)).T
Xv = X.values * w
C = Xv.T @ Xv / w[:, 0].sum()
self.covariance_ = C
return self
class TaxAdjustment:
"""Adjust mean return for taxes. It should be 1. if we are at loss and 0.85 if we are in super profit. Anything
in between will produce way smaller factor around 0.5"""
def __init__(self, market_value, profit, tax=0.15, days_until_year_end=None):
assert market_value.notnull().all()
self.market_value = market_value
self.profit = profit
self.tax = tax
self.days_until_year_end = days_until_year_end
def fit(self, mu, sigma):
b = self.market_value
profit = self.profit
# only pick selected assets
m = mu.loc[b.index]
sigma = sigma.loc[b.index, b.index]
# scale sigma to the end of the year
days_until_year_end = (
self.days_until_year_end
or (
datetime.date(datetime.date.today().year + 1, 1, 1)
- datetime.date.today()
).days
)
sigma = sigma * days_until_year_end / 365
# calculate tax factor
x = np.random.multivariate_normal(m, sigma, size=100000)
r = x @ b
factor = (r + profit > 0) * (1 - self.tax) + (r + profit < 0)
tr = x.T * factor
m = mu.copy()
m.update(pd.Series(tr.mean(axis=1), index=b.index))
# f = (tr.mean() - np.minimum(profit, profit * (1 - self.tax))) / r.mean()
print(f"Tax loss: {(m / mu).loc[b.index].round(2)}")
# adjust mean returns and update original mean
# mu = mu.copy()
# mu.update(m * f)
return m
class JPMEstimator(object):
def __init__(self, year=2021, currency="usd", rfr=0.0, verbose=False):
self.rfr = rfr
self.verbose = verbose
self.year = year
self.currency = currency
self.col_ret = f"Arithmetic Return {year}"
def _parse_jpm(self):
# load excel
path = (
Path(__file__).parents[1]
/ "data"
/ "jpm_assumptions"
/ f"jpm-matrix-{self.currency}-{self.year}.xlsx"
)
df = pd.read_excel(path, skiprows=7)
df.columns = [
"class",
"asset",
f"Compound Return {self.year}",
self.col_ret,
"Annualized Volatility",
f"Compound Return {self.year - 1}",
] + list(df.columns[6:])
df["class"] = df["class"].fillna(method="ffill")
# correlation matrix
corr = df.iloc[:, 6:]
corr.index = df.asset
corr.columns = df.asset
corr = corr.fillna(corr.T)
# returns matrix
rets = df.iloc[:, 1:6].set_index("asset")
rets = rets.replace({"-": None}).astype(float) / 100
# fix names
rets.index = [c.replace("\xa0", " ") for c in rets.index]
corr.index = [c.replace("\xa0", " ") for c in corr.index]
corr.columns = [c.replace("\xa0", " ") for c in corr.columns]
if self.currency == "usd":
rf = rets.loc["U.S. Cash", self.col_ret]
elif self.currency == "eur":
rf = rets.loc["Euro Cash", self.col_ret]
else:
raise NotImplementedError()
rets["Sharpe"] = (rets[self.col_ret] - rf) / rets["Annualized Volatility"]
return rets, corr
def jpm_map(self):
jpm = {}
for k, syms in JPM_MAP.items():
jpm[k] = k
for sym in syms:
jpm[sym] = k
return jpm
def simulate(self, S):
# simulate assets from JPM
rets, corr = self._parse_jpm()
freq = tools.freq(S.index)
mean = rets[self.col_ret] / freq
vols = rets["Annualized Volatility"] / np.sqrt(freq)
cov = corr * np.outer(vols, vols)
Y = np.random.multivariate_normal(mean, cov, size=len(S))
Y = | pd.DataFrame(1 + Y, columns=mean.index, index=S.index) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
from msticpy.analysis.anomalous_sequence import sessionize
class TestSessionize(unittest.TestCase):
def setUp(self):
self.df1 = pd.DataFrame({"UserId": [], "time": [], "operation": []})
self.df1_with_ses_col = pd.DataFrame(
{"UserId": [], "time": [], "operation": [], "session_ind": []}
)
self.df1_sessionized = pd.DataFrame(
{
"UserId": [],
"time_min": [],
"time_max": [],
"operation_list": [],
"duration": [],
"number_events": [],
}
)
self.df2 = pd.DataFrame(
{
"UserId": [1, 1, 2, 3, 1, 2, 2],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-06 11:06:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
],
"operation": ["A", "B", "C", "A", "A", "B", "C"],
}
)
self.df2_with_ses_col_1 = pd.DataFrame(
{
"UserId": [1, 1, 1, 2, 2, 2, 3],
"time": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"operation": ["A", "B", "A", "C", "B", "C", "A"],
"session_ind": [0, 0, 1, 2, 3, 4, 5],
}
)
self.df2_sessionized_1 = pd.DataFrame(
{
"UserId": [1, 1, 2, 2, 2, 3],
"time_min": [
pd.to_datetime("2020-01-03 00:00:00"),
pd.to_datetime("2020-01-03 01:00:00"),
pd.to_datetime("2020-01-05 00:00:00"),
pd.to_datetime("2020-01-05 00:21:00"),
pd.to_datetime("2020-01-05 00:25:00"),
pd.to_datetime("2020-01-06 11:06:00"),
],
"time_max": [
pd.to_datetime("2020-01-03 00:01:00"),
pd.to_datetime("2020-01-03 01:00:00"),
| pd.to_datetime("2020-01-05 00:00:00") | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# # ReEDS Scenarios on PV ICE Tool STATES
# To explore different scenarios for furture installation projections of PV (or any technology), ReEDS output data can be useful in providing standard scenarios. ReEDS installation projections are used in this journal as input data to the PV ICE tool.
#
# Current sections include:
#
# <ol>
# <li> ### Reading a standard ReEDS output file and saving it in a PV ICE input format </li>
# <li>### Reading scenarios of interest and running PV ICE tool </li>
# <li>###Plotting </li>
# <li>### GeoPlotting.</li>
# </ol>
# Notes:
#
# Scenarios of Interest:
# the Ref.Mod,
# o 95-by-35.Adv, and
# o 95-by-35+Elec.Adv+DR ones
#
# In[1]:
import PV_ICE
import numpy as np
import pandas as pd
import os,sys
import matplotlib.pyplot as plt
from IPython.display import display
plt.rcParams.update({'font.size': 22})
plt.rcParams['figure.figsize'] = (12, 8)
# In[2]:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'SF_States')
statedatafolder = str(Path().resolve().parent.parent.parent / 'PV_ICE' / 'TEMP' / 'STATEs')
print ("Your simulation will be stored in %s" % testfolder)
# In[3]:
PV_ICE.__version__
# ### Reading REEDS original file to get list of SCENARIOs, PCAs, and STATEs
# In[4]:
r"""
reedsFile = str(Path().resolve().parent.parent.parent / 'December Core Scenarios ReEDS Outputs Solar Futures v2a.xlsx')
print ("Input file is stored in %s" % reedsFile)
rawdf = pd.read_excel(reedsFile,
sheet_name="UPV Capacity (GW)")
#index_col=[0,2,3]) #this casts scenario, PCA and State as levels
#now set year as an index in place
#rawdf.drop(columns=['State'], inplace=True)
rawdf.drop(columns=['Tech'], inplace=True)
rawdf.set_index(['Scenario','Year','PCA', 'State'], inplace=True)
scenarios = list(rawdf.index.get_level_values('Scenario').unique())
PCAs = list(rawdf.index.get_level_values('PCA').unique())
STATEs = list(rawdf.index.get_level_values('State').unique())
simulationname = scenarios
simulationname = [w.replace('+', '_') for w in simulationname]
simulationname
SFscenarios = [simulationname[0], simulationname[4], simulationname[8]]
"""
# ### Reading GIS inputs
# In[5]:
r"""
GISfile = str(Path().resolve().parent.parent.parent.parent / 'gis_centroid_n.xlsx')
GIS = pd.read_excel(GISfile)
GIS = GIS.set_index('id')
GIS.head()
GIS.loc['p1'].long
"""
# ### Create Scenarios in PV_ICE
# #### Downselect to Solar Future scenarios of interest
#
# Scenarios of Interest:
# <li> Ref.Mod
# <li> 95-by-35.Adv
# <li> 95-by-35+Elec.Adv+DR
# In[6]:
SFscenarios = ['Reference.Mod', '95-by-35.Adv', '95-by-35_Elec.Adv_DR']
SFscenarios
# In[7]:
STATEs = ['WA', 'CA', 'VA', 'FL', 'MI', 'IN', 'KY', 'OH', 'PA', 'WV', 'NV', 'MD',
'DE', 'NJ', 'NY', 'VT', 'NH', 'MA', 'CT', 'RI', 'ME', 'ID', 'MT', 'WY', 'UT', 'AZ', 'NM',
'SD', 'CO', 'ND', 'NE', 'MN', 'IA', 'WI', 'TX', 'OK', 'OR', 'KS', 'MO', 'AR', 'LA', 'IL', 'MS',
'AL', 'TN', 'GA', 'SC', 'NC']
# ### Create the 3 Scenarios and assign Baselines
#
# Keeping track of each scenario as its own PV ICE Object.
# In[8]:
MATERIALS = ['glass', 'silicon', 'silver','copper','aluminium','backsheet','encapsulant']
# In[9]:
#for ii in range (0, 1): #len(scenarios):
i = 0
r1 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r1.createScenario(name=STATEs[jj], file=filetitle)
r1.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 1
r2 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r2.createScenario(name=STATEs[jj], file=filetitle)
r2.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
i = 2
r3 = PV_ICE.Simulation(name=SFscenarios[i], path=testfolder)
for jj in range (0, len(STATEs)):
filetitle = SFscenarios[i]+'_'+STATEs[jj]+'.csv'
filetitle = os.path.join(statedatafolder, filetitle)
r3.createScenario(name=STATEs[jj], file=filetitle)
r3.scenario[STATEs[jj]].addMaterials(MATERIALS, baselinefolder=r'..\..\baselines\SolarFutures_2021', nameformat=r'\baseline_material_{}_Reeds.csv')
# # Calculate Mass Flow
# In[10]:
r1.scenMod_noCircularity()
r2.scenMod_noCircularity()
r3.scenMod_noCircularity()
IRENA= False
PERFECTMFG = False
ELorRL = 'RL'
if IRENA:
r1.scenMod_IRENIFY(ELorRL=ELorRL)
r2.scenMod_IRENIFY(ELorRL=ELorRL)
r3.scenMod_IRENIFY(ELorRL=ELorRL)
if PERFECTMFG:
r1.scenMod_PerfectManufacturing()
r2.scenMod_PerfectManufacturing()
r3.scenMod_PerfectManufacturing()
# In[11]:
r1.calculateMassFlow()
r2.calculateMassFlow()
r3.calculateMassFlow()
# In[12]:
print("STATEs:", r1.scenario.keys())
print("Module Keys:", r1.scenario[STATEs[jj]].data.keys())
print("Material Keys: ", r1.scenario[STATEs[jj]].material['glass'].materialdata.keys())
# # OPEN EI
# In[13]:
kk=0
SFScenarios = [r1, r2, r3]
SFScenarios[kk].name
# In[14]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI.csv', index=False)
print("Done")
# In[15]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
for ii in range (0, len(materials)):
sentit = '@value|'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]]/keywscale[jj]
else:
sentit = '@value|'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI Yearly Only.csv', index=False)
print("Done")
# In[16]:
# WORK ON THIS FOIR OPENEI
keyw=['mat_Virgin_Stock','mat_Total_EOL_Landfilled','mat_Total_MFG_Landfilled', 'mat_Total_Landfilled',
'new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['VirginMaterialDemand','EOLMaterial', 'ManufacturingScrap','ManufacturingScrapAndEOLMaterial',
'NewInstalledCapacity','InstalledCapacity']
keywunits = ['MetricTonnes', 'MetricTonnes', 'MetricTonnes', 'MetricTonnes',
'MW','MW']
keywdcumneed = [True,True,True,True,
True,False]
keywdlevel = ['material','material','material','material',
'module','module']
keywscale = [1000000, 1000000, 1000000, 1000000,
1,1e6]
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
if keywdlevel[jj] == 'material':
if keywdcumneed[jj]:
for ii in range (0, len(materials)):
sentit = '@value|Cumulative'+keywprint[jj]+'|'+materials[ii].capitalize() +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyw[jj]].cumsum()/keywscale[jj]
else:
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+'PV' +'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
foo['@states'] = STATEs[zz]
foo['@scenario|Solar Futures'] = SFScenarios[kk].name
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv(title_Method+' OpenEI Cumulatives Only.csv', index=False)
print("Done")
# In[ ]:
# WORK ON THIS FOIR OPENEI
# SCENARIO DIFERENCeS
keyw=['new_Installed_Capacity_[MW]','Installed_Capacity_[W]']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
keywprint = ['NewInstalledCapacity','InstalledCapacity']
sfprint = ['Reference','Grid Decarbonization', 'High Electrification']
keywunits = ['MW','MW']
keywdcumneed = [True,False]
keywdlevel = ['module','module']
keywscale = [1,1e6]
materials = []
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for zz in range (0, len(STATEs)):
foo = pd.DataFrame()
for jj in range (0, len(keyw)):
# kk -- scenario
for kk in range(0, 3):
sentit = '@value|'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
#sentit = '@value|'+keywprint[jj]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]]/keywscale[jj]
if keywdcumneed[jj]:
sentit = '@value|Cumulative'+keywprint[jj]+'|'+sfprint[kk]+'#'+keywunits[jj]
foo[sentit] = SFScenarios[kk].scenario[STATEs[zz]].data[keyw[jj]].cumsum()/keywscale[jj]
# foo['@value|scenario|Solar Futures'] = SFScenarios[kk].name
foo['@states'] = STATEs[zz]
foo['@timeseries|Year'] = SFScenarios[kk].scenario[STATEs[zz]].data.year
scenariolist = scenariolist.append(foo)
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
cols = [scenariolist.columns[-1]] + [col for col in scenariolist if col != scenariolist.columns[-1]]
scenariolist = scenariolist[cols]
#scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE OpenEI ScenarioDifferences.csv', index=False)
print("Done")
# In[ ]:
scenariolist.head()
# # SAVE DATA FOR BILLY: STATES
# In[ ]:
#for 3 significant numbers rounding
N = 2
# SFScenarios[kk].scenario[PCAs[zz]].data.year
#
# Index 20 --> 2030
#
# Index 30 --> 2040
#
# Index 40 --> 2050
# In[ ]:
idx2030 = 20
idx2040 = 30
idx2050 = 40
print("index ", idx2030, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2030])
print("index ", idx2040, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2040])
print("index ", idx2050, " is year ", r1.scenario[STATEs[0]].data['year'].iloc[idx2050])
# #### 6 - STATE Cumulative Virgin Needs by 2050
#
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 6 - STATE Cumulative2050 VirginMaterialNeeds_tons.csv')
# #### 7 - STATE Cumulative EoL Only Waste by 2050
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = []
for ii in range (0, len(materials)):
keywordsum = []
for zz in range (0, len(STATEs)):
keywordsum.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword].sum())
materiallist.append(keywordsum)
df = pd.DataFrame (materiallist,columns=STATEs, index = materials)
df = df.T
df = df.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , df], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PV ICE 7 - STATE Cumulative2050 Waste_EOL_tons.csv')
# ##### 8 - STATE Yearly Virgin Needs 2030 2040 2050
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PVICE 8 - STATE Yearly 2030 2040 2050 VirginMaterialNeeds_tons.csv')
# #### 9 - STATE Yearly EoL Waste 2030 2040 205
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium', 'encapsulant', 'backsheet']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tonnes
#scenariolist = scenariolist.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#scenariolist = scenariolist.applymap(lambda x: int(x))
scenariolist.to_csv('PVICE 9 - STATE Yearly 2030 2040 2050 Waste_EOL_tons.csv')
# # APPENDIX TABLES
#
#
# #### Appendix - Cumulative Virgin Stock
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:20].sum())
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:30].sum())
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:].sum())
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
# Loop over SF Scenarios
for kk in range(0, 3):
filter_col = [col for col in scenariolist if (col.startswith(SFScenarios[kk].name)) ]
scen = scenariolist[filter_col]
scen.columns = scen.columns.str.lstrip(SFScenarios[kk].name+'_') # strip suffix at the right end only.
scen = scen.rename_axis('State')
scen = scen.sort_values(by='glass_2050', ascending=False)
scen.sum(axis=0)
reduced = scen.iloc[0:23]
new_row = pd.Series(data=scen.iloc[23::].sum(axis=0), name='OTHER STATES')
new_row_2 = pd.Series(data=scen.sum(axis=0), name='US TOTAL')
reduced = reduced.append(new_row, ignore_index=False)
reduced = reduced.append(new_row_2, ignore_index=False)
reduced = reduced.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
reduced = reduced.applymap(lambda x: int(x))
reduced.to_csv('PV ICE Appendix - '+ SFScenarios[kk].name + ' Cumulative Virgin Stock by State.csv')
# #### Appendix - Yearly Virgin Stock
# In[ ]:
keyword='mat_Virgin_Stock'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2030])
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2040])
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][idx2050])
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
# Loop over SF Scenarios
for kk in range(0, 3):
filter_col = [col for col in scenariolist if (col.startswith(SFScenarios[kk].name)) ]
scen = scenariolist[filter_col]
scen.columns = scen.columns.str.lstrip(SFScenarios[kk].name+'_') # strip suffix at the right end only.
scen = scen.rename_axis('State')
scen = scen.sort_values(by='glass_2050', ascending=False)
reduced = scen.iloc[0:23]
new_row = pd.Series(data=scen.iloc[23::].sum(axis=0), name='OTHER STATES')
new_row_2 = pd.Series(data=scen.sum(axis=0), name='US TOTAL')
reduced = reduced.append(new_row, ignore_index=False)
reduced = reduced.append(new_row_2, ignore_index=False)
reduced = reduced.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
reduced = reduced.applymap(lambda x: int(x))
reduced.to_csv('PV ICE Appendix - '+ SFScenarios[kk].name + ' Yearly Virgin Stock by State.csv')
# #### Appendix - Cumulative EOL_ WASTE by State
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = pd.DataFrame()
for kk in range(0, 3):
# Loop over Materials
materiallist = pd.DataFrame()
for ii in range (0, len(materials)):
keywordsum2030 = []
keywordsum2040 = []
keywordsum2050 = []
for zz in range (0, len(STATEs)):
keywordsum2030.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:20].sum())
keywordsum2040.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:30].sum())
keywordsum2050.append(SFScenarios[kk].scenario[STATEs[zz]].material[materials[ii]].materialdata[keyword][0:].sum())
yearlylist = pd.DataFrame([keywordsum2030, keywordsum2040, keywordsum2050], columns=STATEs, index = [2030, 2040, 2050])
yearlylist = yearlylist.T
yearlylist = yearlylist.add_prefix(materials[ii]+'_')
materiallist = pd.concat([materiallist, yearlylist], axis=1)
materiallist = materiallist.add_prefix(SFScenarios[kk].name+'_')
scenariolist = pd.concat([scenariolist , materiallist], axis=1)
scenariolist = scenariolist/1000000 # Converting to Metric Tons
# Loop over SF Scenarios
for kk in range(0, 3):
filter_col = [col for col in scenariolist if (col.startswith(SFScenarios[kk].name)) ]
scen = scenariolist[filter_col]
scen.columns = scen.columns.str.lstrip(SFScenarios[kk].name+'_') # strip suffix at the right end only.
scen = scen.rename_axis('State')
#scen = scen.sort_values(by='glass_2050', ascending=False)
reduced = scen
new_row = pd.Series(data=scen.sum(axis=0), name='US TOTAL')
reduced = reduced.append(new_row, ignore_index=False)
#reduced = reduced.applymap(lambda x: round(x, N - int(np.floor(np.log10(abs(x))))))
#reduced = reduced.applymap(lambda x: int(x))
reduced.to_csv('PV ICE Appendix - '+ SFScenarios[kk].name + ' Cumulative EOL_ WASTE by State.csv')
# ##### Sparkplots + APPENDIX - Yearly EoL Waste
# In[ ]:
sparkplotfolder = os.path.join(testfolder, 'SPARKPLOTS')
if not os.path.exists(sparkplotfolder):
os.makedirs(sparkplotfolder)
# In[ ]:
keyword='mat_Total_EOL_Landfilled'
materials = ['glass', 'silicon', 'silver', 'copper', 'aluminium']
SFScenarios = [r1, r2, r3]
# Loop over SF Scenarios
scenariolist = | pd.DataFrame() | pandas.DataFrame |
import ast
import datetime
import time
import math
import pypandoc
import os
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
import pandas as pd
import statsmodels.api as sm
from library.api import API_HOST, fetch_objects, fetch_objects_by_id, get_token
from library.settings import MIN_VIDEO_LENGTH
def get_unix_date(date):
if date:
timestamp = time.mktime(datetime.datetime.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%SZ").timetuple())
return int(timestamp)
return None
def html2latex(text):
output = pypandoc.convert(text, 'latex', format='html', extra_args=['-f', 'html+tex_math_dollars'])
return output
def process_step_url(row):
if ('max_step_variation' not in row.index) or (row.max_step_variation == 1):
# no step variations
return '{}/lesson/{}/step/{}'.format(API_HOST, row.lesson_id, row.step_position)
return '{}/lesson/{}/step/{}?alternative={}'.format(API_HOST,
row.lesson_id, row.step_position, row.step_variation)
# API functions
def get_course_structure(course_id, cached=True, token=None):
# use cache
course_structure_filename = 'cache/course-{}-structure.csv'.format(course_id)
if os.path.isfile(course_structure_filename) and cached:
course_structure = pd.read_csv(course_structure_filename)
return course_structure
if not token:
token = get_token()
course = fetch_objects_by_id('courses', course_id, token=token)[0]
sections = fetch_objects('sections', token=token, id=course['sections'])
unit_ids = [unit for section in sections for unit in section['units']]
units = fetch_objects('units', token=token, id=unit_ids)
lesson_ids = [unit['lesson'] for unit in units]
lessons = fetch_objects('lessons', token=token, id=lesson_ids)
step_ids = [step for lesson in lessons for step in lesson['steps']]
steps = fetch_objects('steps', token=token, id=step_ids)
step_id = [step['id'] for step in steps]
step_position = [step['position'] for step in steps]
step_type = [step['block']['name'] for step in steps]
step_lesson = [step['lesson'] for step in steps]
step_correct_ratio = [step['correct_ratio'] for step in steps]
course_structure = pd.DataFrame({'course_id': course_id,
'lesson_id': step_lesson,
'step_id': step_id,
'step_position': step_position,
'step_type': step_type,
'step_correct_ratio': step_correct_ratio})
module_position = [[section['position']]*len(section['units']) for section in sections]
module_position = [value for small_list in module_position for value in small_list]
module_id = [[section['id']]*len(section['units']) for section in sections]
module_id = [value for small_list in module_id for value in small_list]
module_hard_deadline = [[section['hard_deadline']]*len(section['units']) for section in sections]
module_hard_deadline = [value for small_list in module_hard_deadline for value in small_list]
module_begin_date = [[section['begin_date']]*len(section['units']) for section in sections]
module_begin_date = [value for small_list in module_begin_date for value in small_list]
lesson_position = [unit['position'] for unit in units]
module_structure = pd.DataFrame({'lesson_id': lesson_ids,
'lesson_position': lesson_position,
'module_id': module_id,
'module_position': module_position,
'hard_deadline': module_hard_deadline,
'begin_date': module_begin_date})
course_structure = course_structure.merge(module_structure)
course_structure = course_structure.sort_values(['module_position', 'lesson_position', 'step_position'])
course_structure.to_csv(course_structure_filename, index=False)
return course_structure
def get_course_submissions(course_id, course_structure=pd.DataFrame(), cached=True, token=None):
header = ['submission_id', 'step_id', 'user_id', 'attempt_time', 'submission_time', 'status']
# use cache
course_submissions_filename = 'cache/course-{}-submissions.csv'.format(course_id)
if os.path.isfile(course_submissions_filename) and cached:
course_submissions = pd.read_csv(course_submissions_filename)
course_submissions = course_submissions[header]
return course_submissions
if not token:
token = get_token()
if course_structure.empty:
course_structure = get_course_structure(course_id, token)
course_submissions = pd.DataFrame()
for step in course_structure.step_id.unique().tolist():
step_submissions = pd.DataFrame(fetch_objects('submissions', token=token, step=step))
if step_submissions.empty:
continue
step_submissions = step_submissions.rename(columns={'id': 'submission_id',
'time': 'submission_time',
'attempt': 'attempt_id'})
attempt_ids = step_submissions['attempt_id'].unique().tolist()
step_attempts = pd.DataFrame(fetch_objects_by_id('attempts', attempt_ids, token=token))
step_attempts = step_attempts.rename(columns={'id': 'attempt_id',
'time': 'attempt_time',
'status': 'attempt_status'})
step_submissions = pd.merge(step_submissions, step_attempts, on='attempt_id')
step_submissions['step_id'] = step
course_submissions = course_submissions.append(step_submissions)
if course_submissions.empty:
return pd.DataFrame(columns=header)
course_submissions['submission_time'] = course_submissions['submission_time'].apply(get_unix_date)
course_submissions['attempt_time'] = course_submissions['attempt_time'].apply(get_unix_date)
course_submissions = course_submissions.rename(columns={'user': 'user_id'})
course_submissions = course_submissions[header]
course_submissions.to_csv(course_submissions_filename, index=False)
return course_submissions
def get_course_grades(course_id, cached=True, token=None):
header = ['user_id', 'step_id', 'is_passed', 'score', 'total_score', 'date_joined', 'last_viewed']
# use cache
course_grades_filename = 'cache/course-{}-grades.csv'.format(course_id)
if os.path.isfile(course_grades_filename) and cached:
course_grades = pd.read_csv(course_grades_filename)
course_grades = course_grades[header]
return course_grades
if not token:
token = get_token()
course_grades = pd.DataFrame()
grades = fetch_objects('course-grades', course=course_id, token=token)
for grade in grades:
user_grade = pd.DataFrame(grade['results']).transpose()
user_grade['user_id'] = grade['user']
user_grade['total_score'] = grade['score']
user_grade['date_joined'] = grade['date_joined']
user_grade['last_viewed'] = grade['last_viewed']
course_grades = course_grades.append(user_grade)
course_grades['date_joined'] = course_grades['date_joined'].apply(get_unix_date)
course_grades['last_viewed'] = course_grades['last_viewed'].apply(get_unix_date)
course_grades = course_grades.reset_index(drop=True)
course_grades = course_grades[header]
course_grades.to_csv(course_grades_filename, index=False)
return course_grades
def get_enrolled_users(course_id, token=None):
if not token:
token = get_token()
learner_group = fetch_objects('courses', token=token, pk=course_id)[0]['learners_group']
users = fetch_objects('groups', token=token, pk=learner_group)[0]['users']
return users
def process_options_with_name(data, reply, option_names):
data = ast.literal_eval(data)
reply = ast.literal_eval(reply)['choices']
is_multiple = data['is_multiple_choice']
options = data['options']
option_id = []
clue = []
for op in options:
if op in option_names.option_name.tolist():
val = option_names.loc[option_names.option_name == op, 'option_id'].values[0]
clue_val = option_names.loc[option_names.option_name == op, 'is_correct'].values[0]
else:
val = np.nan
clue_val = np.nan
option_id += [val]
clue += [clue_val]
answer = [(c == r) for c, r in zip(clue, reply)]
options = pd.DataFrame({'is_multiple': is_multiple,
'option_id': option_id,
'answer': answer,
'clue': clue})
options = options[['is_multiple', 'option_id', 'answer', 'clue']]
return options
def get_question(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
question = source[0]['block']['text']
except:
question = '\n'
question = html2latex(question)
return question
def get_step_options(step_id):
source = fetch_objects('step-sources', id=step_id)
try:
options = source[0]['block']['source']['options']
options = pd.DataFrame(options)
is_multiple = source[0]['block']['source']['is_multiple_choice']
except KeyError:
options = pd.DataFrame(columns=['step_id', 'option_id', 'option_name', 'is_correct', 'is_multiple'])
return options
options['step_id'] = step_id
options['is_multiple'] = is_multiple
options = options.sort_values('text').reset_index()
options = options.rename(columns={'text': 'option_name'})
options['option_id'] = options.index + 1
options = options[['step_id', 'option_id', 'option_name', 'is_correct', 'is_multiple']]
return options
def get_step_info(step_id):
info = pd.Series(fetch_objects('steps', pk=step_id)[0])
info = info.rename(columns={'id': 'step_id'})
return info
# IRT functions
def create_answer_matrix(data, user_column, item_column, value_column, aggfunc=np.mean, time_column=None):
if time_column:
# select only the first response
data = data.loc[data.groupby([item_column, user_column])[time_column].idxmin()]
data = data.drop_duplicates(subset=[item_column, user_column])
answers = pd.pivot_table(data, values=[value_column], index=[user_column], columns=[item_column],
aggfunc=aggfunc)
if not answers.empty:
answers = answers[value_column]
return answers
# TODO: add Cronbach's alpha to item statistics
# see http://stackoverflow.com/questions/20799403/improving-performance-of-cronbach-alpha-code-python-numpy
def get_item_statistics(answers, discrimination_prop=0.3):
total_people = answers.shape[0]
n_people = answers.count(axis=0)
# use mean (not sum) because of NA values
item_difficulty = 1 - answers.mean(axis=0)
total_score = answers.mean(axis=1)
item_total_corr = answers.corrwith(total_score)
n_top_people = int(discrimination_prop * total_people)
low_performers = total_score.sort_values().index[:n_top_people]
top_performers = total_score.sort_values().index[-n_top_people:]
item_discrimination = answers.loc[top_performers].mean(axis=0) - answers.loc[low_performers].mean(axis=0)
stats = pd.DataFrame({'item': item_difficulty.index,
'n_people': n_people,
'difficulty': item_difficulty,
'item_total_corr': item_total_corr,
'discrimination': item_discrimination})
stats.reset_index(drop=True, inplace=True)
stats = stats[['item', 'n_people', 'difficulty', 'discrimination', 'item_total_corr']]
return stats
# Video report
def get_video_stats(step_id, cached=True, token=None):
if not token:
token = get_token()
cached_name = 'cache/step-{}-videostats.csv'.format(step_id)
if cached and os.path.isfile(cached_name):
stats = pd.read_csv(cached_name)
return stats
stats = pd.DataFrame(fetch_objects('video-stats', token=token, step=step_id))
if not stats.empty:
stats.to_csv(cached_name, index=False)
stats = pd.read_csv(cached_name)
return stats
def get_video_peaks(stats, plot=False, ax=None, ax2=None):
header = ['start', 'peak', 'end', 'rise_rate', 'is_common',
'width', 'height', 'area']
if stats.empty:
return pd.DataFrame(columns=header)
row = stats.loc[stats.index[0]]
try:
watched_first = np.array(ast.literal_eval(row['watched_first']))
watched_total = np.array(ast.literal_eval(row['watched_total']))
play = np.array(ast.literal_eval(row['play']))
except ValueError:
return pd.DataFrame(columns=header)
# use only shortest data for analyses
video_length = min(len(watched_first), len(watched_total), len(play))
if video_length < MIN_VIDEO_LENGTH:
return pd.DataFrame(columns=header)
watched_first = watched_first[:video_length]
watched_total = watched_total[:video_length]
play = play[:video_length]
play[0] = play[1] # ignore auto-play in the beginning
rewatching = watched_total - watched_first
# To fight the noise, use smoothing technique before analysis
rewatching = get_smoothing_data(rewatching, frac=0.05)
play = get_smoothing_data(play, frac=0.1)
rewatch_windows = detect_peaks(rewatching)
play_windows = detect_peaks(play)
rewatch_windows['is_common'] = False
play_windows['is_common'] = False
# find common windows
for ind, row in rewatch_windows.iterrows():
start = row['start']
end = row['end']
if play_windows.loc[~((play_windows.end < start) | (end < play_windows.start))].shape[0] > 0:
rewatch_windows.loc[ind, 'is_common'] = True
common_windows = rewatch_windows[rewatch_windows.is_common].copy()
if plot:
peak_plot(rewatching, rewatch_windows, ax)
if ax:
ax.set_ylabel('Num rewatchers', fontsize=10)
peak_plot(play, play_windows, ax2)
if ax2:
ax2.set_xlabel('Time in video (seconds)', fontsize=10)
ax2.set_ylabel('Num play events', fontsize=10)
# calculate peak features (normalized width, height, and area)
total_length = len(rewatching)
total_height = max(rewatching)
total_area = sum(rewatching)
if not common_windows.empty:
common_windows['width'] = common_windows.apply(lambda x: (x['end']-x['start'])/total_length, axis=1)
common_windows['height'] = common_windows.apply(lambda x: rewatching[x['peak']]/total_height, axis=1)
common_windows['area'] = common_windows.apply(
lambda x: rewatching[x['start']:x['end']].sum()/total_area, axis=1)
else:
common_windows = | pd.DataFrame(columns=header) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from pathlib import Path
import PyCrowdTangle as pct
import time
import glob
import os
from tqdm import tqdm
from ratelimiter import RateLimiter
from .utils import Utils
logger = logging.getLogger(__name__)
class CrowdTangle:
"""Descripción de la clase.
api_key (str): CrowdTangle API key.
"""
def __init__(self, api_key):
"""Constructor method
"""
if not api_key:
raise Exception('Crowdtangle Api Token is missing')
self.api_key = api_key
def get_shares(self, urls, url_column='url', date_column='date', platforms=('facebook', 'instagram'),
nmax=1000, max_calls = 2, clean_urls=False, save_ctapi_output=False,
temp_saves = False, temp_number = 1000,
id_column=None, remove_days=None):
""" Get the URLs shares from CrowdTangle from a list of URLs with publish datetime
Args:
urls (pandas.DataFrame): a dataframe with at least a column "url" containing the URLs, and a column "date" with their published date
url_column (str, optional): name of the column (placed inside quote marks) where the URLs are stored. Defaults to 'url'.
date_column (str, optional): name of the column (placed inside quote marks) where the date of the URLs are stored. Defaults to 'date'.
platforms (tuple, optional): a tuple of platforms to search. You can specify only facebook to search on Facebook, or only instagram to
search on Instagram. Defaults to ('facebook', 'instagram').
nmax (int, optional): max number of results for query. Defaults to 100.
max_calls (int, optional): Max number of Api call per minute. It can be lowered or increased
depending on the assigned API rate limit. Defaults to 2.
clean_urls (bool, optional): clean the URLs from tracking parameters. Defaults to False.
save_ctapi_output (bool, optional): saves the original CT API output in rawdata/ folder. Defaults to False.
temp_saves (bool, optional): saves the partial concatenated dataframe to create a final dataframe at the end
temp_number (int, optional): number of downloaded urls to be saves as temporal, temp_saves has to be set to 'True'
id_column(str,optional): name of the column wherre the id of each URL is stored.
remove_days(int,optional): remove shares performed more than X days from first share
Raises:
Exception: [description]
Exception: [description]
e: [description]
SystemExit: [description]
Returns:
pandas.DataFrame: A pandas dataframe of posts that shared the URLs and a number of variables returned by the https://github.com/CrowdTangle/API/wiki/Links CrowdTangle API links endpoint and the original data set of news.
"""
try:
if url_column not in urls.columns:
message = f"Can't find {url_column} in urls dataframe"
raise Exception(message)
if date_column not in urls.columns:
message = f"Can't find {date_column} in urls dataframe"
raise Exception(message)
logger.info("########## PyCoornet ##########")
logger.info(f"get_shares script execute \n\n")
# remove duplicated rows
urls = urls.drop_duplicates(subset=url_column, keep=False)
# set column names
urls = urls.rename(columns={url_column: 'url', date_column: 'date'})
#convert the type of date column to datetime
urls['date'] = pd.to_datetime(urls['date'])
# clean the URLs
if clean_urls:
urls = Utils.clean_urls(urls, 'url')
logger.info("Original URLs have been cleaned")
# create empty dataframe
ct_shares_df = pd.DataFrame()
if temp_saves:
#create dir to save temporal data
Path("rawdata").mkdir(parents=True, exist_ok=True)
# for temporal file numbering
num =1
# if temp number is bigger than the number of urls
if temp_number > len(urls):
temp_number = len(urls)//2
#number of maximum calls to crowdtangle per minute
rate_limiter = RateLimiter(max_calls=max_calls, period=60)
# Progress bar tqdm
for i in tqdm(range(len(urls))):
# set date limits, endDate: one week after date_published
startDate = urls.iloc[i, :].loc['date']
#startDate = startDate.replace(microsecond=0)
if remove_days:
days = f"{remove_days} day"
endDate = startDate + pd.Timedelta(days)
else:
endDate = None
url = urls.iloc[i, :].loc['url']
#add ratelimit restriction
with rate_limiter:
try:
# pycrowdtangle get links
data = pct.ct_get_links(link=url, platforms=platforms,
start_date=startDate,
end_date=endDate,
include_history='true',
sortBy='date',
count=nmax,
api_token=self.api_key
)
# if status is an error
if data['status'] != 200:
logger.exception(f"Unexpected http response code on url {url}")
print(f"Unexpected http response code on url {url}")
#next iteration
continue
#if data response is empty
if not data['result']['posts']:
print(f"Empty response on url: {url}")
logger.debug(f"Empty response on url: {url}")
continue
# convert json response to dataframe
df = | pd.DataFrame(data['result']['posts']) | pandas.DataFrame |
import requests
import pandas as pd
import json
import datetime as dt
import time
#=========================================================================================
# Automatic CSV File Generator for Meetup.com API Data
# Created by: <NAME>
# Date: Jan 17, 2018
#=========================================================================================
"""Prompts user for API key, zipcode, and search radius (0 - 100 miles).
Stores api key in api_key.txt for later user.
Exports a csv file containing rows with the following meetup group information:
- name of group
- group url (used as the main group identifier for the meetup API)
- city
- latitude of meetup location
- longitude of meetup location
- meetup category (Social, Tech, Arts, etc.)
- datetime of group creation
- status (active, grace)
- # of current members
- join mode (open, approval)
- # of previous events held
- datetime of most recently past event
- # of 'yes' rsvps for most recently past event
"""
# Meetup.com api instructions can be found here: https://www.meetup.com/meetup_api/docs/find/groups/
def get_api_key():
"""Checks if there is an api_key.txt file in the folder.
If none exists, prompts the user for an api key and store it in a newly created file.
"""
try:
with open("api_key.txt", "r") as f: #try to open up the txt file and read in api key
api_key = f.read()
print("***User API key is on file.***")
print("")
key = str(api_key)
return key
except: #if file does not already exist
with open("api_key.txt", "w+") as f: #create the file
print("**(You can find your API key for meetup.com at https://secure.meetup.com/meetup_api/key/)")
print(" ")
key = input("API key: ") #prompt user for api key
f.write(key) #write the user input to the api_key.txt file for later use
return key
def query_api():
"""Queries the meetup.com API and returns a list of dictionaries containing the requested info on meetup groups.
Responses are in batches of 200 entries. There is a time delay between reqs to not exceed the API rate limit.
Once we receive a response containing less than 200 entries, we know we have collected all information we need."""
finished = False #we are just beginning!
batch = 0 #set our first batch number
raw_data = [] #create our list to eventually return
while finished == False: #keep looping until we run out of data to find
parameters = {"key":key, "sign":"true", "page":"200", "offset":batch, "zip":zip, "radius":radius, "only":"category,created,urlname,city,join_mode,last_event,members,name,past_event_count,status,lat,lon", "fields":"last_event,past_event_count"}
response = requests.get("https://api.meetup.com/find/groups", params = parameters) #make the API request
status = response.status_code #get the status of our request
data = response.json() #convert the JSON data to python dictionaries
print("Batch number: {}".format(batch+1))
if status == 200: #things are ok
print("Server request: OK")
if status == 401: #not ok
print("***Bad server request!***")
exit()
if status != 200 and status != 401: #else
print("Server request: Status Code {}".format(status))
print("Number of groups returned: " + str(len(data)))
print(" ")
raw_data.append(data) #add the raw data to our list
if len(data) < 200: #if we get less than 200 entries, stop the loop and return our final list
finished = True
print(" ")
print("***Finished!***")
return raw_data
else: #if not, pause before making a new request with a new batch number
time.sleep(0.25)
batch+=1
def convert_to_df(raw_data):
"""Takes in a list of dictionaries, does some cleaning, and converts each to a pandas df.
Returns a single dataframe combining all entries."""
all_dfs = [] #master list of dataframes
for i in range(0,len(raw_data)): #for each dictionary in our raw_data list
utc_found = False #Search the dictionary for an entry containing the utc_offset value to adjust utc to local time
while utc_found == False:
for each in raw_data[i]:
if "last_event" in each:
utc_offset = int(each["last_event"]["utc_offset"])
utc_found = True
else:
next
for each in raw_data[i]: #clean up each dictionary
if "category" in each: #correct the category name
each["category"] = each["category"]["name"]
each["created"] = each["created"] + utc_offset #correct the founding date to the local timezone
if "past_event_count" in each: #if there is a past event counter make it an integer
each["past_event_count"] = int(each["past_event_count"])
if "last_event" in each: #if there was a past event, clean up the rsvp_count and the time of the event
last = each["last_event"]
each["last_rsvp"] = int(last["yes_rsvp_count"]) #make a new, separate dict key for the rsvp count
each["last_event"] = int(last["time"] + utc_offset) #correct the last event date with UTC offset
data = | pd.DataFrame(raw_data[i]) | pandas.DataFrame |
''' Get Per Season Level data from the Player Page '''
import requests, pandas
from bs4 import BeautifulSoup, Comment
def getpp(player_id):
baseurl = "http://www.basketball-reference.com/players/{firstletter}/{playerid}.html"
return requests.get(baseurl.format(firstletter=player_id[:1],playerid=player_id))
def test(player_id):
return get_player_data(player_id)
class player_page:
def __init__(self,player_id):
self.player_id = player_id
self.all_tables = self.get_player_data()
self.totals = self.all_tables.get('totals', pandas.DataFrame()).pipe(self.add_player_id).pipe(self.remap).pipe(self.create_composite_key).pipe(self.convert_nullcolumns_from_number)
self.per_game = self.all_tables.get('per_game', pandas.DataFrame()).pipe(self.add_player_id)
self.per_minute = self.all_tables.get('per_minute', pandas.DataFrame()).pipe(self.add_player_id)
self.per_poss = self.all_tables.get('per_poss', pandas.DataFrame()).pipe(self.add_player_id)
self.advanced = self.all_tables.get('advanced', pandas.DataFrame()).pipe(self.add_player_id)
self.shooting = self.all_tables.get('shooting', pandas.DataFrame()).pipe(self.add_player_id)
self.advanced_pbp = self.all_tables.get('advanced_pbp', pandas.DataFrame()).pipe(self.add_player_id)
self.playoffs_totals = self.all_tables.get('playoffs_totals', pandas.DataFrame()).pipe(self.add_player_id)
self.playoffs_per_game = self.all_tables.get('playoffs_per_game', pandas.DataFrame()).pipe(self.add_player_id)
self.playoffs_per_minute = self.all_tables.get('playoffs_per_minute', | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from pathlib import Path
import os
import argparse
import logging
from camel_tools.utils.charsets import UNICODE_PUNCT_CHARSET
import pandas as pd
import re
from funcy import log_durations
from camel_tools.utils.normalize import normalize_unicode
# punctuation set used in tokenize_hyph, which skips hyphens. NOTE: LOC hamza and 3ayn are not! part of UNICODE_PUNCT_CHARSET so no need to worry about them.
puncs = UNICODE_PUNCT_CHARSET-{'-'}#,'ʼ','ʻ'}
puncs = dict(zip(puncs,len(puncs)*['']))
# manual tag selection is documented in documentation/tag_selection(dev).tsv and 01-explore_dev.ipynb
SELECTED_TAGS = [
'245a',
'245c',
'100a',
'260b',
'250a',
'245b',
'264b',
'264a',
'700a',
'490a',
'600a',
'246a',
'440a',
'830a',
'260f',
'700t',
'490v',
'600t',
'500a',
'740a',
'111a',
'110b',
'440v',
'246b',
'440p',
'800a',
'775t',
'775d',
'775b',
'830v'
]
punc_remove_dict = dict(zip(UNICODE_PUNCT_CHARSET,['']*len(UNICODE_PUNCT_CHARSET))) #required for depunc(string) but no needed to load for every string
def depunc(string,keep_hyph=False):
dct = punc_remove_dict.copy()
if keep_hyph:
dct.pop('-')
mapped = map(lambda x: dct.get(x,x),string) # for each char if char is in punc_remove_dict get dict value (i.e, empty string ''), else get char itself
return ''.join(mapped)
# TODO: make consisten filter names (anyways filters returning false will be excluded, and true included)
def drop_nan(dfrow):
'''checks if ar and rom are of type str, and further checks if those lines are not numeric when stripped of punctuation'''
return not dfrow.isnull().any()
def filter_nonnumeric_str(dfrow):
'''checks if ar and rom are of type str, and further checks if those lines are not numeric when stripped of punctuation'''
ar = dfrow['ar']
rom = dfrow['rom']
if type(ar) == str and type(rom) == str:
if not depunc(ar).isnumeric() and not depunc(rom).replace(' ','').isnumeric():
return True
else:
return False
else:
return False
def drop_link_errors(dfrow):
if dfrow['link-error']==False:
return True
else:
return False
def filter_persian(dfrow):
# perfilter
sent = str(dfrow['rom']).split()
for tok in sent:
if tok.endswith('-i') or tok.endswith("-'i"):
return False
else:
return True
def filter_tags(dfrow,selected_tags=SELECTED_TAGS):
combtag = str(dfrow['comb.tag'])
if combtag in selected_tags:
return True
else:
return False
def filter_nonalligned(dfrow):
rom = str(dfrow['rom'])
ar = str(dfrow['ar'])
rom = recompose(tokenize_skiphyph(rom),mode='rom').split()
ar = recompose(tokenize_skiphyph(ar),mode='ar').split()
if len(rom)==len(ar):
return True
else:
return False
def filter_data(lines,filter_funcs, print_log = True, log_additional_columns = ['recID','comb.tag']):
'''applies filter funcs to lines, and logs how many lines were removed, as well as additional logs such as 'comb.tag' or 'id' '''
filtered_lines = lines.copy()
previous = pd.DataFrame()
for func in filter_funcs:
print(func.__name__)
logging.info(func.__name__)
condition = filtered_lines.apply(func,axis=1)==True
previous = filtered_lines
filtered_lines = filtered_lines[condition]
if print_log:
print(f'# of removed lines: {len(previous)-len(filtered_lines)}')
logging.info(f'# of removed lines: {len(previous)-len(filtered_lines)}')
if log_additional_columns:
for column in log_additional_columns:
print(f'# of removed {column}: {len(set(previous[column]))-len(set(filtered_lines[column]))}')
logging.info(f'# of removed {column}: {len(set(previous[column]))-len(set(filtered_lines[column]))}')
# print('# of removed by tag:')
# print(lines[~condition]['comb.tag'].value_counts()[:50]) # prints most frequent removed tags and their frequency
return filtered_lines
### recompose sentences
# 1
def recompose_waw(line): # reattach waws
return line.replace(' و ', ' و')
# 2
def tokenize_skiphyph(sent,puncs=puncs):
chars = []
sent = str(sent)
for char in list(sent):
if char in puncs:
chars.append(' '+char+' ')
else:
chars.append(char)
sent = ''.join(chars)
sent = re.sub(r'\s+',r' ',sent)
return sent.strip()
# 3
def remove_extra_space(sent):
return re.sub(r'\s+',r' ',sent)
# 4
def recompose_hyphens(sent,mode): #TODO: there is no longer a separate rom handling, recomposition happens on all equally, so other code should be changed accordingly
sent = re.sub(r'\s*(-+)\s*',r' \1 ',sent)
if mode == 'rom':
# NOTE: Rule 16 b form loc rules: (b) Inseparable prepositions, conjunctions, and other prefixes are connected with what follows
# DEFINE prc3 prc3:0 prc3:na prc3:>a_ques
# DEFINE prc2 prc2:fa_conn prc2:fa_rc prc2:na prc2:0 prc2:wa_part prc2:wa_sub prc2:fa_sub prc2:wa_conj prc2:fa_conj
# DEFINE prc1 prc1:la_emph prc1:ka_prep prc1:fiy_prep prc1:li_jus prc1:0 prc1:la_rc prc1:li_prep prc1:wA_voc prc1:yA_voc prc1:ta_prep prc1:wa_prep prc1:sa_fut prc1:hA_dem prc1:bi_prep prc1:na prc1:la_prep prc1:bi_part
# DEFINE prc0 prc0:na prc0:mA_part prc0:0 prc0:Al_det prc0:mA_neg prc0:lA_neg prc0:mA_rel
# NOTE: from database almor-msa-r13: DEFINE prc1 prc1:la_emph prc1:ka_prep prc1:fiy_prep prc1:li_jus prc1:0 prc1:la_rc prc1:li_prep prc1:wA_voc prc1:yA_voc prc1:ta_prep prc1:wa_prep prc1:sa_fut prc1:hA_dem prc1:bi_prep prc1:na prc1:la_prep prc1:bi_part
sent = re.sub(r'\b(al|ka|fa|la|bi|lil|wa|lā|mā|li|sa|ta|)\s*-\s+',r'\1-',sent) #check to see if all applicable prefixes are covered
return sent.strip()
def recompose_lacunas(sent):
sent = re.sub(r'(\.{2,})',r' \1 ',sent)
sent = remove_extra_space(sent).strip()
return sent
# 5
def recompose_right_punc(sent):
right_attaching_puncs = re.escape(r'.?,)]!،؟')
sent = re.sub(f'\s*([{right_attaching_puncs}])',r'\1',sent)
return sent.strip() # must make sure no trailing spaces at the end!!!!
# 6
def recompose_left_punc(sent):
left_attaching_puncs = re.escape(r'([')
sent = re.sub(f'\s*([{left_attaching_puncs}])\s*',r' \1',sent)
return sent.strip()
# 7
def recompose_date_slash(sent):
return re.sub(r'(\d)\s*(/)\s*(\d)',r'\1\2\3',sent)
# 8
def recompose_quotes(sent):
chars = list(sent)
begin_quote = False
bef = ''
aft = ''
for char_index in range(len(chars)):
char = chars[char_index]
if char_index>0:
bef = chars[char_index-1]
if char_index<len(chars)-1:
aft = chars[char_index+1]
if char in {'"',"'"}:
if not begin_quote:
begin_quote = True
if aft == ' ':
chars[char_index+1] = ''
else:
begin_quote = False
if bef == ' ':
chars[char_index-1] = ''
return ''.join(chars)
def recompose(sent,mode): #TODO: this was changed so only difference between ar and rom is waw handling
#NOTE: order of rules is not trivial!!
sent = remove_extra_space(sent)
sent = tokenize_skiphyph(sent)
sent = recompose_hyphens(sent,mode='rom')
sent = recompose_right_punc(sent)
sent = recompose_lacunas(sent)
sent = recompose_left_punc(sent)
sent = recompose_date_slash(sent)
sent = recompose_quotes(sent)
if mode=='ar':
sent = recompose_waw(sent)
elif mode=='rom':
pass
else:
raise Exception('make sure you have the right recompose mode 1)ar 2)rom')
return sent
def create_splits(lines):
def dosplits(record_list_row):
if record_list_row.name%10 == 0:
return 'dev'
elif record_list_row.name%10 in range(1,9):
return 'train'
elif record_list_row.name%10 == 9:
return 'test'
record_list = lines['recID'].drop_duplicates().reset_index(drop=True)
record_list = | pd.DataFrame(record_list) | pandas.DataFrame |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import pickle
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib.pyplot as plt # for making plots,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import tensorflow as tf
import tensorflow_hub as hub
import scipy.stats as stats # library for statistics and technical programming,
import tensorflow.keras as keras
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import backend as K # used for housekeeping of tf models,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.decomposition import PCA
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras import activations
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras import Sequential, activations, initializers
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# Function, ................................................................................
def plot_NN_loss_acc(*, model_history_df, title="", n_mean=3, figsize=(8,4), top=0.75):
''' small function to plot loss and accuracy over epoch using data created with history.history() keras functions,
the columns shodul be called acc, loss, and val_acc, val_loss,
# ...
. model_history_df : dataframe, created with history.history() keras functions (see in the above)
. n_mean : int, how many last results use to caulate values displayed in suplot title
. title. : str, plot title,
'''
#.. figure, axes,
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle(title)
#.. Plot accuracy values
ax1.plot(model_history_df.loc[:,'loss'], label='train loss')
ax1.plot(model_history_df.loc[:,'val_loss'], label='val loss')
ax1.set_title('Mean validation loss {:.3f}'.format(np.mean(model_history_df.loc[:, 'val_loss'][-n_mean:])))
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss value')
ax1.grid(ls="--", color="grey")
ax1.legend()
#.. Plot accuracy values
ax2.plot(model_history_df.loc[:, 'acc'], label='train acc')
ax2.plot(model_history_df.loc[:, 'val_acc'], label='val acc')
ax2.set_title('Mean validation acc {:.3f}'.format(
np.mean(model_history_df.loc[:, 'val_acc'][-n_mean:])))
ax2.set_xlabel('epoch')
ax2.set_ylabel('accuracy')
ax2.set_ylim(0,1)
ax2.grid(ls="--", color="grey")
ax2.legend()
# ...
plt.tight_layout()
plt.subplots_adjust(top=top)
plt.show()
# Function, ........................................................................
def deNovoCNN_gridsearch(*,
# ... model/run description
method,
run_name,
dataset_name,
dataset_variant,
module_name,
# input data,
cnn_model_function,
grid, # list or list-like, obj created wiht ParameterGrid()
path, # path to data
input_data_info_df=None, # df, with 3. columns,
unit_test = False,
# options,
default_validation_split=0.2, # float, used only if this value is not provided in input_data_info_df or validation_split pramatere in
datagen_params = dict(rescale =1/255), # dict, for keras imagedatagenerator, used only if not provided with parameter grid,
# info,
verbose=False, # detailed info,
model_fit__verbose=0,
):
"""
this function will train model provided with the
important:
cnn_model_function : this fucntion needs to return two objects
* keras model : compileed already
* list with callback function/s - always a list !
here is how teturn function looks:
return model, [callback_function]
Limitations,
because we are creating valid dataset as subset of train data with image data generators,
these must be done using the same image generator (or else it will return Value Err after 1st epoch)
from that reason, validaiton data have the same transformations as train data, and return typically lower acc values as
the tests done with other technicques and pielines,
code example:
_ ,_ , _ , _ = deNovoCNN_gridsearch(
# model/run description
method = ai_method,
run_name = "test_run",
dataset_name = dataset_name,
dataset_variant = dataset_variant,
module_name = None,
# input data,
cnn_model_function = create_cnn_model,
grid = grid,
path = path, # path to dir with data subsets in separate folders for train, test ect...
input_data_info_df = CNN_DF_INFO, # df, with 3. columns, subset_role/results_name/dirname
# results and info,
verbose=True
)
"""
# Set up, .............................................................................
#.. variables,
model_ID = -1 # to start usnique counts of the models from 0
colname_with_classname_in_batch_labels_table = "classname"
#.. objects to store the results,
model_acc_and_parameters_list = list()
model_predictions_dict = dict()
model_parameters_dict = dict() # for iterations from grid
model_history_dict = dict()
# create input_data_info_df is None available,
if input_data_info_df is None:
input_data_info_df = pd.DataFrame([
{
"subset_role": "train",
"subset_results_name": "train",
"subset_dirname": "train"
},
{
"subset_role": "valid",
"subset_results_name": "valid",
"subset_dirname": None
},
{
"subset_role": "test",
"subset_results_name": "test",
"subset_dirname": "test"
}
])
else:
pass
#..
if unit_test==True:
subset_dirname_to_use_for_unit_test = input_data_info_df.subset_dirname.iloc[0]
input_data_info_df.subset_dirname = subset_dirname_to_use_for_unit_test
else:
pass
# for loop for grid serch wiith each parameter combination,, ...............................
for params_i, params in enumerate(grid):
model_ID +=1
# check for required parameters, except for validation split,
try:
params['method_group']
except:
params['method_group']="unknown"
try:
params['method_variant']
except:
params['method_variant']="unknown"
try:
params['random_state_nr']
except:
params['random_state_nr']=0
try:
params['batch_size']
except:
params['batch_size']=10
try:
params['img_size']
except:
params['img_size']=[128, 128]
try:
params['epoch']
except:
params['epoch']=100
try:
params['early_strop']
except:
params['early_strop']=None
# add or reset status column un info df,
input_data_info_df["status"]="awating"
#.. plot history,
'''
printed here, so you know what data are being loaded with image generators,
'''
if verbose==True:
print(f"\n\n..................................................")
print(f"model_ID: {model_ID}")
print(f"method_group: {params['method_group']}")
print(f"method: {method}")
print(f"method_variant: {params['method_variant']}")
print(f"............................... input data ...")
print(f"run_name: {run_name}")
print(f"dataset_name: {dataset_name}")
print(f"dataset_variant: {dataset_variant}")
print(f"unit_test: {unit_test}")
print(f"............................... basic params ...")
print(f"random_state_nr: {params['random_state_nr']}")
print(f"batch_size: {params['batch_size']}")
print(f"img_size: {params['img_size']}")
print(f"epoch: {params['epoch']}")
print(f"..................................................\n")
else:
pass
# set parameters for
try:
train_datagen_params = params["train_datagen_params"]
valid_datagen_params = params["valid_datagen_params"]
test_datagen_params = params["test_datagen_params"]
datagen_params_info = "imgadatagen prams provided by the user"
except:
train_datagen_params = datagen_params
valid_datagen_params = datagen_params
test_datagen_params = datagen_params
datagen_params_info = "using default imgadatagen params"
# load train & valid data
if unit_test==True:
valid_datagen_params = train_datagen_params
test_datagen_params = train_datagen_params
else:
pass
# dirnames (only one dirname is allowed with generaqtors,)
train_subset_dirname = input_data_info_df.subset_dirname.loc[input_data_info_df.subset_role=="train"].iloc[0]
valid_subset_dirname = input_data_info_df.subset_dirname.loc[input_data_info_df.subset_role=="valid"].iloc[0] # not used at this moment,
# OPTION 1, subset valid data from train data
if valid_subset_dirname is None or isinstance(valid_subset_dirname, float):
# set-up directory names and datagen parameters,
if isinstance(valid_subset_dirname, float):
train_datagen_params["validation_split"]=valid_subset_dirname
# else its None, so we have to get validaiton split value from somewhere,
else:
try:
train_datagen_params["validation_split"] #KeyError if it is missing,
except:
train_datagen_params["validation_split"]=default_validation_split
train_datagen = ImageDataGenerator(**train_datagen_params)
TRAINING_DIR = os.path.join(path, train_subset_dirname)
VALIDATION_DIR = TRAINING_DIR
# update, status:
input_data_info_df.loc[input_data_info_df.subset_role=="train", "status"] = "Loading"
input_data_info_df.loc[input_data_info_df.subset_role=="valid", "status"] = f"{train_datagen_params['validation_split']} of train"
#.. for train dataset
trainset = train_datagen.flow_from_directory(
TRAINING_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =True,
subset ="training"
)
#.. for validation data, no shuffle, made from the same dataset as train data,
validset = train_datagen.flow_from_directory(
VALIDATION_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False,
subset ="validation"
)
# OPTION 2, valid data are loaded from separate directory,
else:
#.. create image generator, validation_split=params["validation_split"]
train_datagen = ImageDataGenerator(**train_datagen_params)
valid_datagen = ImageDataGenerator(**valid_datagen_params)
TRAINING_DIR = os.path.join(path, train_subset_dirname)
VALIDATION_DIR = os.path.join(path, valid_subset_dirname)
# update, status:
input_data_info_df.loc[input_data_info_df.subset_role=="train", "status"] = "Loading"
input_data_info_df.loc[input_data_info_df.subset_role=="valid", "status"] = "Loading"
#.. for train dataset
trainset = train_datagen.flow_from_directory(
TRAINING_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =True
)
#.. for validation data, no shuffle, made from the same dataset as train data,
validset = valid_datagen.flow_from_directory(
VALIDATION_DIR,
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False
)
# Collect class encoding/decoding - its not the standard one, .....................
class_encoding = trainset.class_indices
class_decoding = dict(zip(list(class_encoding.values()),list(class_encoding.keys())))
# add some info:
if verbose==True:
print("Datagenerator parameters: ",datagen_params_info)
print(input_data_info_df)
print(f"\n")
else:
pass
# train the model, collect the results, and plot history, .........................
#.. create the model,
model, callback_function = cnn_model_function(
input_size=(params["img_size"][0], params["img_size"][1], 3),
output_size=trainset.num_classes,
params=params,
verbose=verbose
)
#.. train the model
if callback_function is not None:
history = model.fit_generator(
generator =trainset, # you provide iterators, instead of data,
validation_data =validset,
epochs =params["epoch"],
callbacks =callback_function, # LIST, functions that can be applied at diffeerent stages to fight overfitting,
verbose =model_fit__verbose
)
else:
history = model.fit_generator(
generator =trainset, # you provide iterators, instead of data,
validation_data =validset,
epochs =params["epoch"],
verbose =model_fit__verbose
)
#.. store the results,
model_acc = dict()
model_loss = dict()
n = 3 # use last 3 results in history,
acc_results = pd.DataFrame(history.history).iloc[-n::,:].mean(axis=0)
model_acc["model_acc_train"] = acc_results.loc["acc"]
model_acc["model_acc_valid"] = acc_results.loc["val_acc"]
model_loss["loss_acc_train"] = acc_results.loc["loss"]
model_loss["loss_acc_valid"] = acc_results.loc["val_loss"]
# LOAD TEST DATA and store all sort of data in the last loop !, .........................
# prepare objects to store results,
baseline_acc = dict()
one_model_predictions = dict() # with predictions collected for each subset separately,
# it will be used temporaly, untul the last chunk of code to reload model predictions to final location
# first, chek if test data were provided,
if (input_data_info_df.subset_role=="test").sum()==0:
pass
else:
# get df subset with test data
test__input_data_info_df = pd.DataFrame(input_data_info_df.loc[input_data_info_df.subset_role=="test",:])
test__input_data_info_df.reset_index(inplace=True, drop=True)
# now check if there is anythign to load, and pass if None,
if test__input_data_info_df.shape[0]==1 and test__input_data_info_df.subset_dirname.iloc[0] is None:
pass
else:
# loop over each row, to load and evaluate each test data
if verbose==True:
print(f"generating predicitons for test data: {test__input_data_info_df.shape[0]} subsets")
else:
pass
for test_subset_nr in range(test__input_data_info_df.shape[0]):
# load the dataset,
one_test_xy_name = test__input_data_info_df.subset_results_name.iloc[test_subset_nr]
one_test_subset_name_to_load = test__input_data_info_df.subset_dirname.iloc[test_subset_nr]
#.. generator for test data
test_datagen = ImageDataGenerator(**test_datagen_params)
#.. find how many images there are
temp_testset = test_datagen.flow_from_directory(
os.path.join(path, one_test_subset_name_to_load),
batch_size =params["batch_size"],
target_size =params["img_size"],
shuffle =False
)
#.. get all images in one batch,
test_img_number = len(temp_testset.filenames)
testset = test_datagen.flow_from_directory(
os.path.join(path, one_test_subset_name_to_load),
batch_size =test_img_number,
target_size =params["img_size"],
shuffle =False
)
# calculate test set accuracy,
#.. get predictions (in dummy array)
test_preds = model.predict_generator(testset)
y_true = testset.classes # array with true labels
y_pred = test_preds.argmax(axis=1) # array with predicted labels
model_acc[f"model_acc_{one_test_xy_name}"] = accuracy_score(y_true, y_pred)
model_loss[f"model_loss_{one_test_xy_name}"]= np.nan
#.. caulate test set baseline
baseline_acc[f"baseline_acc_{one_test_xy_name}"] = pd.Series(y_true).value_counts(normalize=True).sort_values(ascending=True).iloc[0]
#.. store model predictions,
predictions = y_pred
decoded_predictions = pd.Series(y_pred).map(class_decoding).values
model_predictions_proba = test_preds
decoded_y_labels = | pd.Series(y_true) | pandas.Series |
import pytest
import copy
import numpy as np
import pandas as pd
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.exceptions import NotFittedError
from sklearn.dummy import DummyRegressor
from data_dashboard.model_finder import ModelsNotSearchedError
from data_dashboard.model_finder import WrappedModelRegression
@pytest.mark.parametrize(
("test_input",),
(
([0, 1, 2, 3, 4, 5],),
([6, 7, 8, 9, 10, 11],),
([12, 13, 14, 15, 16, 17],),
)
)
def test_model_finder_dummy_regression(model_finder_regression, test_input):
"""Testing if DummyModel (for regression) is created correctly."""
expected_model = DummyRegressor(strategy="median")
median = 35.619966243279364
expected_model_scores = {"mean_squared_error": 487.0142795860736, "r2_score": -0.003656622727187031}
model_finder_regression.scoring_functions = [mean_squared_error, r2_score]
actual_model, actual_model_scores = model_finder_regression._create_dummy_model()
assert str(actual_model) == str(expected_model)
assert np.array_equal(actual_model.predict(test_input), np.array([median] * len(test_input)))
assert actual_model_scores == expected_model_scores
def test_model_finder_regression_dummy_model_results(model_finder_regression):
"""Testing if dummy_model_results() function returns correct DataFrame (regression)."""
_ = {
"model": "DummyRegressor",
"fit_time": np.nan,
"params": "{'constant': None, 'quantile': None, 'strategy': 'median'}",
"mean_squared_error": 487.0142795860736,
"mean_absolute_error": 14.28810797425516,
"explained_variance_score": 0.0,
"r2_score": -0.003656622727187031
}
expected_df = | pd.DataFrame(_, index=[9999]) | pandas.DataFrame |
import urllib
from io import StringIO
from io import BytesIO
import csv
import numpy as np
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
import scipy.signal as signal
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
datos=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2008.txt',sep=";",header=None, decimal=",")
datos[0]=pd.to_datetime(datos[0],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([0],inplace=True)
datos[1]=pd.to_datetime(datos[1],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([1],inplace=True)
datos[1]=str(datos[1])
datos[1]=datos[1].str[1:20]
datos2=pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2009.txt',sep=";",header=None, decimal=",")
datos2[0]=pd.to_datetime(datos2[0],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([0],inplace=True)
datos2[1]=pd.to_datetime(datos2[1],format='%d/%m/%Y %H:%M:%S')
#datos.set_index([1],inplace=True)
datos2[1]=str(datos2[1])
datos2[1]=datos2[1].str[1:20]
datos3= | pd.read_csv('https://raw.githubusercontent.com/ComputoCienciasUniandes/FISI2029-201910/master/Seccion_1/Fourier/Datos/transacciones2010.txt',sep=";",header=None, decimal=",") | pandas.read_csv |
from __future__ import division
import argparse
import mayavi.mlab as mlab
from CameraNetwork.visualization import calcSeaMask
import matplotlib.mlab as ml
import datetime
import glob
import json
import moviepy.editor as mpy
import numpy as np
import os
import pandas as pd
import pymap3d
FLIGHT_PATH = r"data\2017_04_22_09_57_54_040000"
MAP_ZSCALE = 3
PARTICLE_SIZE = 0.28
COLUMNS = [0.25, 0.28, 0.3, 0.35, 0.4, 0.45, 0.5, 0.58, 0.65, 0.7, 0.8, 1, 1.3, 1.6, 2, 2.5, 3, 3.5, 4, 5, 6.5, 7.5, 8.5, 10, 12.5, 15, 17.5, 20, 25, 30, 32]
CLIP_DURATION = 18
def load_path(flight_path, lat0=32.775776, lon0=35.024963, alt0=229):
"""Load the flight path."""
file_paths = sorted(glob.glob(os.path.join(flight_path, '*.json')))
data = []
indices = []
lat = []
lon = []
alt = []
relative_alt = []
for file_path in file_paths:
with open(file_path, 'rb') as f:
d = json.load(f)
if len(d['data'])==0 or d['coords'] is None or d['coords']['lat'] == 0:
#
# ignore corrupt data.
#
continue
t = datetime.datetime(*[int(i) for i in os.path.split(file_path)[-1].split('.')[0].split('_')])
indices.append(t)
data.append(d['data'])
lat.append(d['coords']['lat']*1e-7)
lon.append(d['coords']['lon']*1e-7)
alt.append(d['coords']['alt']*1e-3)
relative_alt.append(d['coords']['relative_alt']*1e-3)
data = np.array(data)[..., :-1]
df = | pd.DataFrame(data=data, index=indices, columns=COLUMNS) | pandas.DataFrame |
"""Tools to visualize the JHU CSSE COVID-19 Data and the forecasts made
with it using the model module.
"""
import numpy as np
import pandas as pd
from babel.dates import format_date
from babel.numbers import format_decimal
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import seaborn as sns
from modules import processing
# Seaborn styling options
sns.set_style('darkgrid')
sns.set_context('paper')
sns.set_palette('muted')
palette = sns.xkcd_palette(['denim blue','pale red'])
blue, red = sns.xkcd_palette(['denim blue','pale red'])
# For localized formatting
locale = 'de_DE'
# Dates
today = pd.to_datetime('today').normalize()
yesterday = today - pd.Timedelta(1,'D')
footnote = 'Updated on {}. JHU CSSE COVID-19 Data: https://github.com/CSSEGISandData/COVID-19.'.format(format_date(today, locale=locale))
def cases(cases, cases_forecast=pd.DataFrame()):
fig, ax = plt.subplots()
if cases_forecast.empty == False:
sns.lineplot(data=cases_forecast, dashes=False, legend=False)
for i in np.arange(len(cases_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases')
ax.set_xlabel(None)
ax.set_ylabel('Cases')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y,p: format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def cases_per_million(cases, population, cases_forecast=pd.DataFrame()):
cases = cases / population * 1000000
cases_forecast = cases_forecast / population * 1000000
fig, ax = plt.subplots()
if cases_forecast.empty == False:
sns.lineplot(data=cases_forecast, dashes=False, legend=False)
for i in np.arange(len(cases_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases per million inhabitants')
ax.set_xlabel(None)
ax.set_ylabel('Cases per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def deaths(deaths, deaths_forecast=pd.DataFrame()):
fig, ax = plt.subplots()
if deaths_forecast.empty == False:
sns.lineplot(data=deaths_forecast, dashes=False, legend=False)
for i in np.arange(len(deaths_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=deaths, dashes=False)
ax.set_title('COVID-19 deaths')
ax.set_xlabel(None)
ax.set_ylabel('Deaths')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def deaths_per_million(deaths, population, deaths_forecast=pd.DataFrame()):
deaths = deaths / population * 1000000
deaths_forecast = deaths_forecast / population * 1000000
fig, ax = plt.subplots()
if deaths_forecast.empty == False:
sns.lineplot(data=deaths_forecast, dashes=False, legend=False)
for i in np.arange(len(deaths_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=deaths, dashes=False)
ax.set_title('COVID-19 deaths per million inhabitants')
ax.set_xlabel(None)
ax.set_ylabel('Deaths per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def daily_cases(cases):
cases = processing.time_series_delta(cases).rolling(7).mean()
fig, ax = plt.subplots()
sns.lineplot(data=cases, dashes=False)
ax.set_title('Daily COVID-19 cases (7 day rolling average)')
ax.set_xlabel(None)
ax.set_ylabel('Daily cases')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def daily_deaths(deaths):
deaths = processing.time_series_delta(deaths).rolling(7).mean()
fig, ax = plt.subplots()
sns.lineplot(data=deaths, dashes=False)
ax.set_title('Daily COVID-19 deaths (7 day rolling average)')
ax.set_xlabel(None)
ax.set_ylabel('Daily deaths')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def daily_cases_per_million(cases, population):
cases = processing.time_series_delta(cases).rolling(7).mean() / population * 1000000
fig, ax = plt.subplots()
sns.lineplot(data=cases, dashes=False)
ax.set_title('Daily COVID-19 cases per million inhabitants (7 day rolling average)')
ax.set_xlabel(None)
ax.set_ylabel('Daily cases per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def daily_deaths_per_million(deaths, population):
deaths = processing.time_series_delta(deaths).rolling(7).mean() / population * 1000000
fig, ax = plt.subplots()
sns.lineplot(data=deaths, dashes=False)
ax.set_title('Daily COVID-19 deaths per million inhabitants (7 day rolling average)')
ax.set_xlabel(None)
ax.set_ylabel('Daily deaths per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def cases_by_days(cases):
cases = processing.align_from_first_ocurrence(cases)
fig, ax = plt.subplots()
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases by days since the first ocurrence')
ax.set_xlabel('Days since the first case')
ax.set_ylabel('Cases')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.01, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def deaths_by_days(deaths):
deaths = processing.align_from_first_ocurrence(deaths)
fig, ax = plt.subplots()
sns.lineplot(data=deaths, dashes=False)
ax.set_title('COVID-19 deaths by days since the first ocurrence')
ax.set_xlabel('Days since the first death')
ax.set_ylabel('Deaths')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.01, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def cases_by_days_per_million(cases, population):
cases = processing.align_from_first_ocurrence(cases)
cases = cases / population * 1000000
fig, ax = plt.subplots()
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases by days since the first ocurrence per million inhabitants')
ax.set_xlabel('Days since the first case')
ax.set_ylabel('Cases per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.01, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def deaths_by_days_per_million(deaths, population):
deaths = processing.align_from_first_ocurrence(deaths)
deaths = deaths / population * 1000000
fig, ax = plt.subplots()
sns.lineplot(data=deaths, dashes=False)
ax.set_title('COVID-19 deaths by days since the first ocurrence per million inhabitants')
ax.set_xlabel('Days since the first death')
ax.set_ylabel('Deaths per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.01, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def mortality(mortality, mortality_forecast=pd.DataFrame()):
fig, ax = plt.subplots()
if mortality_forecast.empty == False:
sns.lineplot(data=mortality_forecast, dashes=False, legend=False)
for i in np.arange(len(mortality_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=mortality.rolling(7).mean(), dashes=False)
ax.set_title('COVID-19 mortality (7 day rolling average)')
ax.set_xlabel(None)
ax.set_ylabel('Percentage of deaths per cases')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y,p: format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def country_situation_with_forecast(cases, deaths, cases_forecast, deaths_forecast):
country = cases.name
days = cases_forecast.shape[0]
fig, axs = plt.subplots(2, 2 , figsize=(13, 6), sharex='col')
fig.subplots_adjust(hspace=0.05)
# Cases
sns.lineplot(
data=cases, color=blue, dashes=False,
ax=axs[0,0], label='Confirmed cases')
sns.lineplot(
data=cases_forecast['yhat'], color=blue,
ax=axs[0,0], label='Projected cases')
axs[0,0].lines[1].set_linestyle('--')
axs[0,0].fill_between(
cases_forecast.index, cases_forecast['yhat_lower'],
cases_forecast['yhat_upper'], color=blue, alpha=0.1,
label='95% confidence interval')
axs[0,0].legend(loc='upper left')
axs[0,0].yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y,locale=locale)))
axs[0,0].set_ylabel('Cases')
# Daily cases
sns.lineplot(
data=processing.time_series_delta(cases).rolling(7).mean(),
color=blue, dashes=False, ax=axs[1,0], label='Confirmed daily cases')
sns.lineplot(
data=processing.time_series_delta( | pd.concat([cases[-8:], cases_forecast['yhat']]) | pandas.concat |
#!/usr/bin/env python
# Copyright 2016 DIANA-HEP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import random
import sys
import time
import unittest
import numpy
import histogrammar as hg
from histogrammar.defs import Factory
from histogrammar.primitives.average import Average
from histogrammar.primitives.bag import Bag
from histogrammar.primitives.bin import Bin
from histogrammar.primitives.categorize import Categorize
from histogrammar.primitives.centrallybin import CentrallyBin
from histogrammar.primitives.collection import Branch, Index, Label, UntypedLabel
from histogrammar.primitives.count import Count
from histogrammar.primitives.deviate import Deviate
from histogrammar.primitives.fraction import Fraction
from histogrammar.primitives.irregularlybin import IrregularlyBin
from histogrammar.primitives.minmax import Minimize, Maximize
from histogrammar.primitives.select import Select
from histogrammar.primitives.sparselybin import SparselyBin
from histogrammar.primitives.stack import Stack
from histogrammar.primitives.sum import Sum
from histogrammar import util
from histogrammar.util import xrange
tolerance = 1e-12
util.relativeTolerance = tolerance
util.absoluteTolerance = tolerance
class Numpy(object):
def __enter__(self):
try:
import numpy
except ImportError:
return None
self.errstate = numpy.geterr()
numpy.seterr(invalid="ignore")
return numpy
def __exit__(self, exc_type, exc_value, traceback):
try:
import numpy
numpy.seterr(**self.errstate)
except ImportError:
pass
class Pandas(object):
def __enter__(self):
try:
import pandas # noqa
except ImportError:
return None
def __exit__(self, exc_type, exc_value, traceback):
try:
import pandas # noqa
except ImportError:
pass
def makeSamples(SIZE, HOLES):
with Numpy() as numpy:
if numpy is None:
return {"empty": None, "positive": None, "boolean": None, "noholes": None, "withholes": None, "withholes2": None}
empty = numpy.array([], dtype=float)
if numpy is not None:
rand = random.Random(12345)
positive = numpy.array([abs(rand.gauss(0, 1)) + 1e-12 for i in xrange(SIZE)])
assert all(x > 0.0 for x in positive)
boolean = positive > 1.5
noholes = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
withholes = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("nan")
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("inf")
for i in xrange(HOLES):
withholes[rand.randint(0, SIZE)] = float("-inf")
withholes2 = numpy.array([rand.gauss(0, 1) for i in xrange(SIZE)])
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("nan")
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("inf")
for i in xrange(HOLES):
withholes2[rand.randint(0, SIZE)] = float("-inf")
return {"empty": empty, "positive": positive, "boolean": boolean, "noholes": noholes, "withholes": withholes, "withholes2": withholes2}
def to_ns(x):
"""convert timestamp to nanosec since 1970-1-1"""
import pandas as pd
return | pd.to_datetime(x) | pandas.to_datetime |
import xarray as xr
import numpy as np
import pandas as pd
import glob
import os
import warnings
warnings.filterwarnings('ignore')
def get_ds_latlon(infile):
ds = xr.open_dataset(infile)
vars_needed = ['StdPressureLev:ascending_TqJoint', 'SurfPres_Forecast_TqJ_A', 'SurfPres_Forecast_TqJ_D',
'Temperature_TqJ_A', 'Temperature_TqJ_D', 'SurfAirTemp_TqJ_A', 'SurfAirTemp_TqJ_D',
'SurfSkinTemp_TqJ_A', 'SurfSkinTemp_TqJ_D', 'H2O_MMR_TqJ_A', 'H2O_MMR_TqJ_D',
'H2O_MMR_Surf_TqJ_A', 'H2O_MMR_Surf_TqJ_D', 'O3_VMR_TqJ_A', 'O3_VMR_TqJ_D']
ds_sub = ds[vars_needed]
var_lat = ds_sub['YDim:ascending_TqJoint'].values
var_lon = ds_sub['XDim:ascending_TqJoint'].values
lat_lon_var = []
for i in var_lat:
for j in var_lon:
lat_lon_var.append([i, j])
return ds_sub, lat_lon_var
def haversine_np(latlon1, latlon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
latlon1, latlon2 = map(np.radians, [latlon1, latlon2])
i = 0
stn_new = []
for item in latlon1:
j = 0
dist = []
for value in latlon2:
dlat = latlon1[i][0]-latlon2[j][0]
dlon = latlon1[i][1]-latlon2[j][1]
a = np.sin(dlat/2.0)**2 + np.cos(latlon2[j][0]) * np.cos(latlon1[i][0]) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
dist.append(km)
j += 1
idx=dist.index(min(dist))
stn_new.append(latlon2[idx])
i += 1
stn_new = list(map(np.degrees, stn_new))
return stn_new
def get_stn_latlonname(station_file):
lst_stn = | pd.read_csv(station_file) | pandas.read_csv |
from anndata import AnnData
import numpy as np
import pandas as pd
import warnings
from ... import logging as logg
from .._distributed import materialize_as_ndarray
from .._utils import _get_mean_var
from scipy.sparse import issparse
def filter_genes_dispersion(data,
flavor='seurat',
min_disp=None, max_disp=None,
min_mean=None, max_mean=None,
n_bins=20,
n_top_genes=None,
log=True,
subset=True,
copy=False):
"""Extract highly variable genes [Satija15]_ [Zheng17]_.
This is a deprecated function, use
:func:`~scanpy.api.pp.highly_variable_genes` instead.
If trying out parameters, pass the data matrix instead of AnnData.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Use `flavor='cell_ranger'` with care and in the same way as in
:func:`~scanpy.api.pp.recipe_zheng17`.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
flavor : {'seurat', 'cell_ranger'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing
'seurat', this expects non-logarithmized data - the logarithm of mean
and dispersion is taken internally when `log` is at its default value
`True`. For 'cell_ranger', this is usually called for logarithmized data
- in this case you should set `log` to `False`. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
log : `bool`, optional (default: `True`)
Use the logarithm of the mean to variance ratio.
subset : `bool`, optional (default: `True`)
Keep highly-variable genes only (if True) else write a bool array for h
ighly-variable genes while keeping all genes
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
If an AnnData `adata` is passed, returns or updates `adata` depending on \
`copy`. It filters the `adata` and adds the annotations
means : adata.var
Means per gene. Logarithmized when `log` is `True`.
dispersions : adata.var
Dispersions per gene. Logarithmized when `log` is `True`.
dispersions_norm : adata.var
Normalized dispersions per gene. Logarithmized when `log` is `True`.
If a data matrix `X` is passed, the annotation is returned as `np.recarray` \
with the same information stored in fields: `gene_subset`, `means`, `dispersions`, `dispersion_norm`.
"""
if n_top_genes is not None and not all([
min_disp is None, max_disp is None, min_mean is None, max_mean is None]):
logg.info('If you pass `n_top_genes`, all cutoffs are ignored.')
if min_disp is None: min_disp = 0.5
if min_mean is None: min_mean = 0.0125
if max_mean is None: max_mean = 3
if isinstance(data, AnnData):
adata = data.copy() if copy else data
result = filter_genes_dispersion(adata.X, log=log,
min_disp=min_disp, max_disp=max_disp,
min_mean=min_mean, max_mean=max_mean,
n_top_genes=n_top_genes,
flavor=flavor)
adata.var['means'] = result['means']
adata.var['dispersions'] = result['dispersions']
adata.var['dispersions_norm'] = result['dispersions_norm']
if subset:
adata._inplace_subset_var(result['gene_subset'])
else:
adata.var['highly_variable'] = result['gene_subset']
return adata if copy else None
logg.msg('extracting highly variable genes',
r=True, v=4)
X = data # no copy necessary, X remains unchanged in the following
mean, var = materialize_as_ndarray(_get_mean_var(X))
# now actually compute the dispersion
mean[mean == 0] = 1e-12 # set entries equal to zero to small value
dispersion = var / mean
if log: # logarithmized mean as in Seurat
dispersion[dispersion == 0] = np.nan
dispersion = np.log(dispersion)
mean = np.log1p(mean)
# all of the following quantities are "per-gene" here
import pandas as pd
df = pd.DataFrame()
df['mean'] = mean
df['dispersion'] = dispersion
if flavor == 'seurat':
df['mean_bin'] = | pd.cut(df['mean'], bins=n_bins) | pandas.cut |
#!/usr/bin/env python
# coding: utf-8
# In[95]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
# ## Step 1: collecting data
# In[96]:
#Reading data
titanic_data = pd.read_csv('titanic_train_clean.csv')
titanic_data.head(10)
# In[97]:
#Get the total number of passengers
titanic_data.shape
# ## Step 2 Analyzing Data
# In[98]:
#survived vs not survived
sns.countplot(x = 'Survived', data = titanic_data)
# In[99]:
#how many males and females
sns.countplot(x = 'Survived', hue='Sex', data=titanic_data)
# In[100]:
#ticket class of the passengers
sns.countplot(x='Survived', hue='Pclass', data=titanic_data)
# In[101]:
#Age distribution
titanic_data['Age'].plot.hist()
# In[102]:
#Fare Distribution
titanic_data['Fare'].plot.hist()
# In[103]:
#get info on columns
titanic_data.info()
# In[104]:
sns.countplot(x='SibSp', data=titanic_data)
# In[105]:
sns.countplot(x='Parch', data=titanic_data)
# ## Step 3 Data Wrangling
# In[106]:
#cleaning the data, removing all null values
#check for null values
titanic_data.isnull()
# In[107]:
#show all null values in the dataset
titanic_data.isnull().sum()
# In[108]:
#display in heatmap
sns.heatmap(titanic_data.isnull(), cmap="viridis")
# In[109]:
#if age had null
#plot boxplot for visualization
sns.boxplot(x='Pclass', y='Age', data=titanic_data)
#imputation
def impute_age(cols):
Age =cols[0]
Pclass = cols[6]
if( | pd.isnull(Age) | pandas.isnull |
import sys, warnings, operator
import json
import time
import types
import numbers
import inspect
import itertools
import string
import unicodedata
import datetime as dt
from collections import defaultdict, OrderedDict
from contextlib import contextmanager
from distutils.version import LooseVersion as _LooseVersion
from functools import partial
from threading import Thread, Event
from types import FunctionType
import numpy as np
import param
# Python3 compatibility
if sys.version_info.major >= 3:
import builtins as builtins # noqa (compatibility)
if sys.version_info.minor > 3:
from collections.abc import Iterable # noqa (compatibility)
else:
from collections import Iterable # noqa (compatibility)
basestring = str
unicode = str
long = int
cmp = lambda a, b: (a>b)-(a<b)
generator_types = (zip, range, types.GeneratorType)
RecursionError = RecursionError if sys.version_info.minor > 4 else RuntimeError # noqa
_getargspec = inspect.getfullargspec
get_keywords = operator.attrgetter('varkw')
LooseVersion = _LooseVersion
else:
import __builtin__ as builtins # noqa (compatibility)
from collections import Iterable # noqa (compatibility)
basestring = basestring
unicode = unicode
from itertools import izip
generator_types = (izip, xrange, types.GeneratorType) # noqa
RecursionError = RuntimeError
_getargspec = inspect.getargspec
get_keywords = operator.attrgetter('keywords')
class LooseVersion(_LooseVersion):
"""
Subclassed to avoid unicode issues in python2
"""
def __init__ (self, vstring=None):
if isinstance(vstring, unicode):
vstring = str(vstring)
self.parse(vstring)
def __cmp__(self, other):
if isinstance(other, unicode):
other = str(other)
if isinstance(other, basestring):
other = LooseVersion(other)
return cmp(self.version, other.version)
numpy_version = LooseVersion(np.__version__)
param_version = LooseVersion(param.__version__)
datetime_types = (np.datetime64, dt.datetime, dt.date, dt.time)
timedelta_types = (np.timedelta64, dt.timedelta,)
arraylike_types = (np.ndarray,)
masked_types = ()
try:
import pandas as pd
except ImportError:
pd = None
if pd:
pandas_version = LooseVersion(pd.__version__)
try:
if pandas_version >= '0.24.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtype as DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
elif pandas_version > '0.20.0':
from pandas.core.dtypes.dtypes import DatetimeTZDtypeType
from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass
else:
from pandas.types.dtypes import DatetimeTZDtypeType
from pandas.types.dtypes.generic import ABCSeries, ABCIndexClass
pandas_datetime_types = (pd.Timestamp, DatetimeTZDtypeType, pd.Period)
pandas_timedelta_types = (pd.Timedelta,)
datetime_types = datetime_types + pandas_datetime_types
timedelta_types = timedelta_types + pandas_timedelta_types
arraylike_types = arraylike_types + (ABCSeries, ABCIndexClass)
if pandas_version > '0.23.0':
from pandas.core.dtypes.generic import ABCExtensionArray
arraylike_types = arraylike_types + (ABCExtensionArray,)
if pandas_version > '1.0':
from pandas.core.arrays.masked import BaseMaskedArray
masked_types = (BaseMaskedArray,)
except Exception as e:
param.main.param.warning('pandas could not register all extension types '
'imports failed with the following error: %s' % e)
try:
import cftime
cftime_types = (cftime.datetime,)
datetime_types += cftime_types
except:
cftime_types = ()
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
class VersionError(Exception):
"Raised when there is a library version mismatch."
def __init__(self, msg, version=None, min_version=None, **kwargs):
self.version = version
self.min_version = min_version
super(VersionError, self).__init__(msg, **kwargs)
class Config(param.ParameterizedFunction):
"""
Set of boolean configuration values to change HoloViews' global
behavior. Typically used to control warnings relating to
deprecations or set global parameter such as style 'themes'.
"""
future_deprecations = param.Boolean(default=False, doc="""
Whether to warn about future deprecations""")
image_rtol = param.Number(default=10e-4, doc="""
The tolerance used to enforce regular sampling for regular,
gridded data where regular sampling is expected. Expressed as the
maximal allowable sampling difference between sample
locations.""")
no_padding = param.Boolean(default=False, doc="""
Disable default padding (introduced in 1.13.0).""")
warn_options_call = param.Boolean(default=True, doc="""
Whether to warn when the deprecated __call__ options syntax is
used (the opts method should now be used instead). It is
recommended that users switch this on to update any uses of
__call__ as it will be deprecated in future.""")
default_cmap = param.String(default='kbc_r', doc="""
Global default colormap. Prior to HoloViews 1.14.0, the default
value was 'fire' which can be set for backwards compatibility.""")
default_gridded_cmap = param.String(default='kbc_r', doc="""
Global default colormap for gridded elements (i.e. Image, Raster
and QuadMesh). Can be set to 'fire' to match raster defaults
prior to HoloViews 1.14.0 while allowing the default_cmap to be
the value of 'kbc_r' used in HoloViews >= 1.14.0""")
default_heatmap_cmap = param.String(default='kbc_r', doc="""
Global default colormap for HeatMap elements. Prior to HoloViews
1.14.0, the default value was the 'RdYlBu_r' colormap.""")
def __call__(self, **params):
self.param.set_param(**params)
return self
config = Config()
class HashableJSON(json.JSONEncoder):
"""
Extends JSONEncoder to generate a hashable string for as many types
of object as possible including nested objects and objects that are
not normally hashable. The purpose of this class is to generate
unique strings that once hashed are suitable for use in memoization
and other cases where deep equality must be tested without storing
the entire object.
By default JSONEncoder supports booleans, numbers, strings, lists,
tuples and dictionaries. In order to support other types such as
sets, datetime objects and mutable objects such as pandas Dataframes
or numpy arrays, HashableJSON has to convert these types to
datastructures that can normally be represented as JSON.
Support for other object types may need to be introduced in
future. By default, unrecognized object types are represented by
their id.
One limitation of this approach is that dictionaries with composite
keys (e.g. tuples) are not supported due to the JSON spec.
"""
string_hashable = (dt.datetime,)
repr_hashable = ()
def default(self, obj):
if isinstance(obj, set):
return hash(frozenset(obj))
elif isinstance(obj, np.ndarray):
return obj.tolist()
if pd and isinstance(obj, (pd.Series, pd.DataFrame)):
return obj.to_csv(header=True).encode('utf-8')
elif isinstance(obj, self.string_hashable):
return str(obj)
elif isinstance(obj, self.repr_hashable):
return repr(obj)
try:
return hash(obj)
except:
return id(obj)
def merge_option_dicts(old_opts, new_opts):
"""
Update the old_opts option dictionary with the options defined in
new_opts. Instead of a shallow update as would be performed by calling
old_opts.update(new_opts), this updates the dictionaries of all option
types separately.
Given two dictionaries
old_opts = {'a': {'x': 'old', 'y': 'old'}}
and
new_opts = {'a': {'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
this returns a dictionary
{'a': {'x': 'old', 'y': 'new', 'z': 'new'}, 'b': {'k': 'new'}}
"""
merged = dict(old_opts)
for option_type, options in new_opts.items():
if option_type not in merged:
merged[option_type] = {}
merged[option_type].update(options)
return merged
def merge_options_to_dict(options):
"""
Given a collection of Option objects or partial option dictionaries,
merge everything to a single dictionary.
"""
merged_options = {}
for obj in options:
if isinstance(obj,dict):
new_opts = obj
else:
new_opts = {obj.key: obj.kwargs}
merged_options = merge_option_dicts(merged_options, new_opts)
return merged_options
def deprecated_opts_signature(args, kwargs):
"""
Utility to help with the deprecation of the old .opts method signature
Returns whether opts.apply_groups should be used (as a bool) and the
corresponding options.
"""
from .options import Options
groups = set(Options._option_groups)
opts = {kw for kw in kwargs if kw != 'clone'}
apply_groups = False
options = None
new_kwargs = {}
if len(args) > 0 and isinstance(args[0], dict):
apply_groups = True
if (not set(args[0]).issubset(groups) and
all(isinstance(v, dict) and not set(v).issubset(groups)
for v in args[0].values())):
apply_groups = False
elif set(args[0].keys()) <= groups:
new_kwargs = args[0]
else:
options = args[0]
elif opts and opts.issubset(set(groups)):
apply_groups = True
elif kwargs.get('options', None) is not None:
apply_groups = True
elif not args and not kwargs:
apply_groups = True
return apply_groups, options, new_kwargs
class periodic(Thread):
"""
Run a callback count times with a given period without blocking.
If count is None, will run till timeout (which may be forever if None).
"""
def __init__(self, period, count, callback, timeout=None, block=False):
if isinstance(count, int):
if count < 0: raise ValueError('Count value must be positive')
elif not type(count) is type(None):
raise ValueError('Count value must be a positive integer or None')
if block is False and count is None and timeout is None:
raise ValueError('When using a non-blocking thread, please specify '
'either a count or a timeout')
super(periodic, self).__init__()
self.period = period
self.callback = callback
self.count = count
self.counter = 0
self.block = block
self.timeout = timeout
self._completed = Event()
self._start_time = None
@property
def completed(self):
return self._completed.is_set()
def start(self):
self._start_time = time.time()
if self.block is False:
super(periodic,self).start()
else:
self.run()
def stop(self):
self.timeout = None
self._completed.set()
def __repr__(self):
return 'periodic(%s, %s, %s)' % (self.period,
self.count,
callable_name(self.callback))
def __str__(self):
return repr(self)
def run(self):
while not self.completed:
if self.block:
time.sleep(self.period)
else:
self._completed.wait(self.period)
self.counter += 1
try:
self.callback(self.counter)
except Exception:
self.stop()
if self.timeout is not None:
dt = (time.time() - self._start_time)
if dt > self.timeout:
self.stop()
if self.counter == self.count:
self.stop()
def deephash(obj):
"""
Given an object, return a hash using HashableJSON. This hash is not
architecture, Python version or platform independent.
"""
try:
return hash(json.dumps(obj, cls=HashableJSON, sort_keys=True))
except:
return None
def tree_attribute(identifier):
"""
Predicate that returns True for custom attributes added to AttrTrees
that are not methods, properties or internal attributes.
These custom attributes start with a capitalized character when
applicable (not applicable to underscore or certain unicode characters)
"""
if identifier[0].upper().isupper() is False and identifier[0] != '_':
return True
else:
return identifier[0].isupper()
def argspec(callable_obj):
"""
Returns an ArgSpec object for functions, staticmethods, instance
methods, classmethods and partials.
Note that the args list for instance and class methods are those as
seen by the user. In other words, the first argument which is
conventionally called 'self' or 'cls' is omitted in these cases.
"""
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
# Parameterized function.__call__ considered function in py3 but not py2
spec = _getargspec(callable_obj.__call__)
args = spec.args[1:]
elif inspect.isfunction(callable_obj): # functions and staticmethods
spec = _getargspec(callable_obj)
args = spec.args
elif isinstance(callable_obj, partial): # partials
arglen = len(callable_obj.args)
spec = _getargspec(callable_obj.func)
args = [arg for arg in spec.args[arglen:] if arg not in callable_obj.keywords]
elif inspect.ismethod(callable_obj): # instance and class methods
spec = _getargspec(callable_obj)
args = spec.args[1:]
else: # callable objects
return argspec(callable_obj.__call__)
return inspect.ArgSpec(args=args,
varargs=spec.varargs,
keywords=get_keywords(spec),
defaults=spec.defaults)
def validate_dynamic_argspec(callback, kdims, streams):
"""
Utility used by DynamicMap to ensure the supplied callback has an
appropriate signature.
If validation succeeds, returns a list of strings to be zipped with
the positional arguments, i.e. kdim values. The zipped values can then
be merged with the stream values to pass everything to the Callable
as keywords.
If the callbacks use *args, None is returned to indicate that kdim
values must be passed to the Callable by position. In this
situation, Callable passes *args and **kwargs directly to the
callback.
If the callback doesn't use **kwargs, the accepted keywords are
validated against the stream parameter names.
"""
argspec = callback.argspec
name = callback.name
kdims = [kdim.name for kdim in kdims]
stream_params = stream_parameters(streams)
defaults = argspec.defaults if argspec.defaults else []
all_posargs = argspec.args[:-len(defaults)] if defaults else argspec.args
# Filter out any posargs for streams
posargs = [arg for arg in all_posargs if arg not in stream_params]
kwargs = argspec.args[-len(defaults):]
if argspec.keywords is None:
unassigned_streams = set(stream_params) - set(argspec.args)
if unassigned_streams:
unassigned = ','.join(unassigned_streams)
raise KeyError('Callable {name!r} missing keywords to '
'accept stream parameters: {unassigned}'.format(name=name,
unassigned=unassigned))
if len(posargs) > len(kdims) + len(stream_params):
raise KeyError('Callable {name!r} accepts more positional arguments than '
'there are kdims and stream parameters'.format(name=name))
if kdims == []: # Can be no posargs, stream kwargs already validated
return []
if set(kdims) == set(posargs): # Posargs match exactly, can all be passed as kwargs
return kdims
elif len(posargs) == len(kdims): # Posargs match kdims length, supplying names
if argspec.args[:len(kdims)] != posargs:
raise KeyError('Unmatched positional kdim arguments only allowed at '
'the start of the signature of {name!r}'.format(name=name))
return posargs
elif argspec.varargs: # Posargs missing, passed to Callable directly
return None
elif set(posargs) - set(kdims):
raise KeyError('Callable {name!r} accepts more positional arguments {posargs} '
'than there are key dimensions {kdims}'.format(name=name,
posargs=posargs,
kdims=kdims))
elif set(kdims).issubset(set(kwargs)): # Key dims can be supplied by keyword
return kdims
elif set(kdims).issubset(set(posargs+kwargs)):
return kdims
elif argspec.keywords:
return kdims
else:
raise KeyError('Callback {name!r} signature over {names} does not accommodate '
'required kdims {kdims}'.format(name=name,
names=list(set(posargs+kwargs)),
kdims=kdims))
def callable_name(callable_obj):
"""
Attempt to return a meaningful name identifying a callable or generator
"""
try:
if (isinstance(callable_obj, type)
and issubclass(callable_obj, param.ParameterizedFunction)):
return callable_obj.__name__
elif (isinstance(callable_obj, param.Parameterized)
and 'operation' in callable_obj.param):
return callable_obj.operation.__name__
elif isinstance(callable_obj, partial):
return str(callable_obj)
elif inspect.isfunction(callable_obj): # functions and staticmethods
return callable_obj.__name__
elif inspect.ismethod(callable_obj): # instance and class methods
meth = callable_obj
if sys.version_info < (3,0):
owner = meth.im_class if meth.im_self is None else meth.im_self
if meth.__name__ == '__call__':
return type(owner).__name__
return '.'.join([owner.__name__, meth.__name__])
else:
return meth.__func__.__qualname__.replace('.__call__', '')
elif isinstance(callable_obj, types.GeneratorType):
return callable_obj.__name__
else:
return type(callable_obj).__name__
except Exception:
return str(callable_obj)
def process_ellipses(obj, key, vdim_selection=False):
"""
Helper function to pad a __getitem__ key with the right number of
empty slices (i.e. :) when the key contains an Ellipsis (...).
If the vdim_selection flag is true, check if the end of the key
contains strings or Dimension objects in obj. If so, extra padding
will not be applied for the value dimensions (i.e. the resulting key
will be exactly one longer than the number of kdims). Note: this
flag should not be used for composite types.
"""
if getattr(getattr(key, 'dtype', None), 'kind', None) == 'b':
return key
wrapped_key = wrap_tuple(key)
ellipse_count = sum(1 for k in wrapped_key if k is Ellipsis)
if ellipse_count == 0:
return key
elif ellipse_count != 1:
raise Exception("Only one ellipsis allowed at a time.")
dim_count = len(obj.dimensions())
index = wrapped_key.index(Ellipsis)
head = wrapped_key[:index]
tail = wrapped_key[index+1:]
padlen = dim_count - (len(head) + len(tail))
if vdim_selection:
# If the end of the key (i.e. the tail) is in vdims, pad to len(kdims)+1
if wrapped_key[-1] in obj.vdims:
padlen = (len(obj.kdims) +1 ) - len(head+tail)
return head + ((slice(None),) * padlen) + tail
def bytes_to_unicode(value):
"""
Safely casts bytestring to unicode
"""
if isinstance(value, bytes):
return unicode(value.decode('utf-8'))
return value
def get_method_owner(method):
"""
Gets the instance that owns the supplied method
"""
if isinstance(method, partial):
method = method.func
return method.__self__ if sys.version_info.major >= 3 else method.im_self
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail
class sanitize_identifier_fn(param.ParameterizedFunction):
"""
Sanitizes group/label values for use in AttrTree attribute
access. Depending on the version parameter, either sanitization
appropriate for Python 2 (no unicode gn identifiers allowed) or
Python 3 (some unicode allowed) is used.
Note that if you are using Python 3, you can switch to version 2
for compatibility but you cannot enable relaxed sanitization if
you are using Python 2.
Special characters are sanitized using their (lowercase) unicode
name using the unicodedata module. For instance:
>>> unicodedata.name(u'$').lower()
'dollar sign'
As these names are often very long, this parameterized function
allows filtered, substitutions and transforms to help shorten these
names appropriately.
"""
version = param.ObjectSelector(sys.version_info.major, objects=[2,3], doc="""
The sanitization version. If set to 2, more aggressive
sanitization appropriate for Python 2 is applied. Otherwise,
if set to 3, more relaxed, Python 3 sanitization is used.""")
capitalize = param.Boolean(default=True, doc="""
Whether the first letter should be converted to
uppercase. Note, this will only be applied to ASCII characters
in order to make sure paths aren't confused with method
names.""")
eliminations = param.List(['extended', 'accent', 'small', 'letter', 'sign', 'digit',
'latin', 'greek', 'arabic-indic', 'with', 'dollar'], doc="""
Lowercase strings to be eliminated from the unicode names in
order to shorten the sanitized name ( lowercase). Redundant
strings should be removed but too much elimination could cause
two unique strings to map to the same sanitized output.""")
substitutions = param.Dict(default={'circumflex':'power',
'asterisk':'times',
'solidus':'over'}, doc="""
Lowercase substitutions of substrings in unicode names. For
instance the ^ character has the name 'circumflex accent' even
though it is more typically used for exponentiation. Note that
substitutions occur after filtering and that there should be no
ordering dependence between substitutions.""")
transforms = param.List(default=[capitalize_unicode_name], doc="""
List of string transformation functions to apply after
filtering and substitution in order to further compress the
unicode name. For instance, the default capitalize_unicode_name
function will turn the string "capital delta" into "Delta".""")
disallowed = param.List(default=['trait_names', '_ipython_display_',
'_getAttributeNames'], doc="""
An explicit list of name that should not be allowed as
attribute names on Tree objects.
By default, prevents IPython from creating an entry called
Trait_names due to an inconvenient getattr check (during
tab-completion).""")
disable_leading_underscore = param.Boolean(default=False, doc="""
Whether leading underscores should be allowed to be sanitized
with the leading prefix.""")
aliases = param.Dict(default={}, doc="""
A dictionary of aliases mapping long strings to their short,
sanitized equivalents""")
prefix = 'A_'
_lookup_table = param.Dict(default={}, doc="""
Cache of previously computed sanitizations""")
@param.parameterized.bothmethod
def add_aliases(self_or_cls, **kwargs):
"""
Conveniently add new aliases as keyword arguments. For instance
you can add a new alias with add_aliases(short='Longer string')
"""
self_or_cls.aliases.update({v:k for k,v in kwargs.items()})
@param.parameterized.bothmethod
def remove_aliases(self_or_cls, aliases):
"""
Remove a list of aliases.
"""
for k,v in self_or_cls.aliases.items():
if v in aliases:
self_or_cls.aliases.pop(k)
@param.parameterized.bothmethod
def allowable(self_or_cls, name, disable_leading_underscore=None):
disabled_reprs = ['javascript', 'jpeg', 'json', 'latex',
'latex', 'pdf', 'png', 'svg', 'markdown']
disabled_ = (self_or_cls.disable_leading_underscore
if disable_leading_underscore is None
else disable_leading_underscore)
if disabled_ and name.startswith('_'):
return False
isrepr = any(('_repr_%s_' % el) == name for el in disabled_reprs)
return (name not in self_or_cls.disallowed) and not isrepr
@param.parameterized.bothmethod
def prefixed(self, identifier, version):
"""
Whether or not the identifier will be prefixed.
Strings that require the prefix are generally not recommended.
"""
invalid_starting = ['Mn', 'Mc', 'Nd', 'Pc']
if identifier.startswith('_'): return True
return((identifier[0] in string.digits) if version==2
else (unicodedata.category(identifier[0]) in invalid_starting))
@param.parameterized.bothmethod
def remove_diacritics(self_or_cls, identifier):
"""
Remove diacritics and accents from the input leaving other
unicode characters alone."""
chars = ''
for c in identifier:
replacement = unicodedata.normalize('NFKD', c).encode('ASCII', 'ignore')
if replacement != '':
chars += bytes_to_unicode(replacement)
else:
chars += c
return chars
@param.parameterized.bothmethod
def shortened_character_name(self_or_cls, c, eliminations=[], substitutions={}, transforms=[]):
"""
Given a unicode character c, return the shortened unicode name
(as a list of tokens) by applying the eliminations,
substitutions and transforms.
"""
name = unicodedata.name(c).lower()
# Filtering
for elim in eliminations:
name = name.replace(elim, '')
# Substitution
for i,o in substitutions.items():
name = name.replace(i, o)
for transform in transforms:
name = transform(name)
return ' '.join(name.strip().split()).replace(' ','_').replace('-','_')
def __call__(self, name, escape=True, version=None):
if name in [None, '']:
return name
elif name in self.aliases:
return self.aliases[name]
elif name in self._lookup_table:
return self._lookup_table[name]
name = bytes_to_unicode(name)
version = self.version if version is None else version
if not self.allowable(name):
raise AttributeError("String %r is in the disallowed list of attribute names: %r" % (name, self.disallowed))
if version == 2:
name = self.remove_diacritics(name)
if self.capitalize and name and name[0] in string.ascii_lowercase:
name = name[0].upper()+name[1:]
sanitized = (self.sanitize_py2(name) if version==2 else self.sanitize_py3(name))
if self.prefixed(name, version):
sanitized = self.prefix + sanitized
self._lookup_table[name] = sanitized
return sanitized
def _process_underscores(self, tokens):
"Strip underscores to make sure the number is correct after join"
groups = [[str(''.join(el))] if b else list(el)
for (b,el) in itertools.groupby(tokens, lambda k: k=='_')]
flattened = [el for group in groups for el in group]
processed = []
for token in flattened:
if token == '_': continue
if token.startswith('_'):
token = str(token[1:])
if token.endswith('_'):
token = str(token[:-1])
processed.append(token)
return processed
def sanitize_py2(self, name):
# This fix works but masks an issue in self.sanitize (py2)
prefix = '_' if name.startswith('_') else ''
valid_chars = string.ascii_letters+string.digits+'_'
return prefix + str('_'.join(self.sanitize(name, lambda c: c in valid_chars)))
def sanitize_py3(self, name):
if not name.isidentifier():
return '_'.join(self.sanitize(name, lambda c: ('_'+c).isidentifier()))
else:
return name
def sanitize(self, name, valid_fn):
"Accumulate blocks of hex and separate blocks by underscores"
invalid = {'\a':'a','\b':'b', '\v':'v','\f':'f','\r':'r'}
for cc in filter(lambda el: el in name, invalid.keys()):
raise Exception("Please use a raw string or escape control code '\%s'"
% invalid[cc])
sanitized, chars = [], ''
for split in name.split():
for c in split:
if valid_fn(c): chars += str(c) if c=='_' else c
else:
short = self.shortened_character_name(c, self.eliminations,
self.substitutions,
self.transforms)
sanitized.extend([chars] if chars else [])
if short != '':
sanitized.append(short)
chars = ''
if chars:
sanitized.extend([chars])
chars=''
return self._process_underscores(sanitized + ([chars] if chars else []))
sanitize_identifier = sanitize_identifier_fn.instance()
group_sanitizer = sanitize_identifier_fn.instance()
label_sanitizer = sanitize_identifier_fn.instance()
dimension_sanitizer = sanitize_identifier_fn.instance(capitalize=False)
def isscalar(val):
"""
Value is scalar or None
"""
return val is None or np.isscalar(val) or isinstance(val, datetime_types)
def isnumeric(val):
if isinstance(val, (basestring, bool, np.bool_)):
return False
try:
float(val)
return True
except:
return False
def asarray(arraylike, strict=True):
"""
Converts arraylike objects to NumPy ndarray types. Errors if
object is not arraylike and strict option is enabled.
"""
if isinstance(arraylike, np.ndarray):
return arraylike
elif isinstance(arraylike, list):
return np.asarray(arraylike, dtype=object)
elif not isinstance(arraylike, np.ndarray) and isinstance(arraylike, arraylike_types):
return arraylike.values
elif hasattr(arraylike, '__array__'):
return np.asarray(arraylike)
elif strict:
raise ValueError('Could not convert %s type to array' % type(arraylike))
return arraylike
nat_as_integer = np.datetime64('NAT').view('i8')
def isnat(val):
"""
Checks if the value is a NaT. Should only be called on datetimelike objects.
"""
if (isinstance(val, (np.datetime64, np.timedelta64)) or
(isinstance(val, np.ndarray) and val.dtype.kind == 'M')):
if numpy_version >= '1.13':
return np.isnat(val)
else:
return val.view('i8') == nat_as_integer
elif pd and val is pd.NaT:
return True
elif pd and isinstance(val, pandas_datetime_types+pandas_timedelta_types):
return pd.isna(val)
else:
return False
def isfinite(val):
"""
Helper function to determine if scalar or array value is finite extending
np.isfinite with support for None, string, datetime types.
"""
is_dask = is_dask_array(val)
if not np.isscalar(val) and not is_dask:
if isinstance(val, np.ma.core.MaskedArray):
return ~val.mask & isfinite(val.data)
elif isinstance(val, masked_types):
return ~val.isna() & isfinite(val._data)
val = asarray(val, strict=False)
if val is None:
return False
elif is_dask:
import dask.array as da
return da.isfinite(val)
elif isinstance(val, np.ndarray):
if val.dtype.kind == 'M':
return ~isnat(val)
elif val.dtype.kind == 'O':
return np.array([isfinite(v) for v in val], dtype=bool)
elif val.dtype.kind in 'US':
return ~pd.isna(val) if pd else np.ones_like(val, dtype=bool)
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
finite &= ~pd.isna(val)
return finite
elif isinstance(val, datetime_types+timedelta_types):
return not isnat(val)
elif isinstance(val, (basestring, bytes)):
return True
finite = np.isfinite(val)
if pd and pandas_version >= '1.0.0':
if finite is pd.NA:
return False
return finite & (~ | pd.isna(val) | pandas.isna |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = | pd.Series([], dtype="float", name="cbt_bird_1inten_mort") | pandas.Series |
'''
Created with love by Sigmoid
@Author - <NAME> - <EMAIL>
'''
# Importing all libraries
import numpy as np
import pandas as pd
import random
import sys
from math import floor
from .erorrs import NotBinaryData, NoSuchColumn
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
class ADASYN:
def __init__(self, binary_columns : list = None, beta : "float < 1" = 1.0, k : "int > 0" = 5, seed : int = 42) -> None:
'''
The constructor of the ADASYN algorithm.
:param binary_columns: list, default = None
The list of columns that should have binary values after balancing.
:param beta: float <= 1, default = 1.0
The ration of minority : majority data desired after ADASYN.
:param k: int > 0, default = 5
The number of neighbours used by the knn algorithm.
:param seed: int, default = 42
The seed for random number generator.
'''
if binary_columns is None:
self.__binarize = False
else:
self.__binarize = True
self.__binary_columns = binary_columns
self.__beta = beta
self.__k = k
self.__seed = seed
np.random.seed(self.__seed)
random.seed(self.__seed)
def __to_binary(self) -> None:
'''
If the :param binary_columns: is set to True then the intermediate values in binary columns will be rounded.
'''
for column_name in self.__binary_columns:
serie = self.synthetic_df[column_name].values
threshold = (self.df[column_name].max() + self.df[column_name].min()) / 2
for i in range(len(serie)):
if serie[i] >= threshold:
serie[i] = self.df[column_name].max()
else:
serie[i] = self.df[column_name].min()
self.synthetic_df[column_name] = serie
def __infinity_check(self, matrix : 'np.array') -> 'np.array':
'''
This function replaces the infinity and -infinity values with the minimal and maximal float python values.
:param matrix: 'np.array'
The numpy array that was generated my the algorithm.
:return: 'np.array'
The numpy array with the infinity replaced values.
'''
matrix[matrix == -np.inf] = sys.float_info.min
matrix[matrix == np.inf] = sys.float_info.max
return matrix
def balance(self, df : pd.DataFrame, target : str):
'''
The balance function.
:param df: pd.DataFrame
The pandas Data Frame to apply the balancer.
:param target: str
The name of the target column.
:return: pd.DataFrame
A pandas Data Frame
'''
# Creating an internal copy of the data frame.
self.df = df.copy()
self.target = target
# Checking if the target string based t algorithm is present in the data frame.
if target not in self.df.columns:
raise NoSuchColumn(f"{target} isn't a column of passed data frame")
# Checking if the target column is a binary one.
if len(self.df[target].unique()) != 2:
raise NotBinaryData(f"{target} column isn't a binary column")
# Getting the column names that are not the target one.
self.X_columns = [column for column in self.df.columns if column != target]
# Getting the class frequencies.
classes_frequency = dict(self.df[target].value_counts())
# Searching for the class with the biggest frequency.
max_freq = 0
for cls in classes_frequency:
if classes_frequency[cls] > max_freq:
majority_class = cls
max_freq = classes_frequency[cls]
# Getting the name of the minority class.
minority_class = [cls for cls in classes_frequency if cls != majority_class][0]
# Getting the total number of minority samples to generate.
G = int((classes_frequency[majority_class] - classes_frequency[minority_class]) * self.__beta)
# Getting the set of the minority samples.
minority_samples = self.df[self.df[target] == minority_class][self.X_columns].values
# Generating the r matrix - the k indexes of the nearest neighbours.
r = np.array([])
self.neighbourhood = []
for minority_sample in minority_samples:
predicted_indexes = self.__predict_knn(minority_sample)
r = np.append(r, len(self.df[(self.df.index.isin(predicted_indexes) & (self.df[self.target] == majority_class))]) / self.__k)
self.neighbourhood.append(predicted_indexes)
# Normalizing the r array
r = r / np.sum(r)
# Calculating the amount of synthetic examples to generate per neighbourhood.
G = r * G
# Generating the synthetic data.
self.synthetic_data = []
for i in range(len(G)):
for _ in range(floor(G[i])):
choices = self.df.iloc[self.neighbourhood[i], :][self.df[self.target] == minority_class][self.X_columns].values
if len(choices) < 2:
continue
choices = choices[
np.random.randint(len(choices), size=2)]
s = choices[0] + (choices[1] - choices[0]) * random.uniform(0, 1)
self.synthetic_data.append(s)
# Replacing infinity values with minimal and maximal float python values.
self.synthetic_data = self.__infinity_check(np.array(self.synthetic_data).astype(float))
# Creating the synthetic data frame
self.synthetic_df = pd.DataFrame(np.array(self.synthetic_data), columns=self.X_columns)
# Rounding binary columns if needed.
if self.__binarize:
self.__to_binary()
# Adding the target column
self.synthetic_df.loc[:, self.target] = minority_class
new_df = | pd.concat([self.df, self.synthetic_df], axis=0) | pandas.concat |
from requests_html import HTMLSession
from requests.exceptions import ConnectionError
from retry import retry
from typing import List
from time import sleep
import pandas as pd
from numpy import nan
import pickle
from logging import getLogger, StreamHandler, Formatter, DEBUG
logger = getLogger(__name__)
logger.setLevel(DEBUG)
ch = StreamHandler()
ch.setLevel(DEBUG)
formatter = Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
def get_articles(base_domain: str, kind: str) -> List[str]:
base_url = base_domain + kind
session = HTMLSession()
r = session.get(base_url + ".html")
all_articles = pd.Series(list(r.html.links), name=kind)
articles = all_articles[all_articles.str.startswith("20")]
logger.debug("Checked: %s", base_url)
sleep(10)
r = session.get(base_url + "_list.html")
all_art_list = pd.Series(list(r.html.links))
art_list = all_art_list[all_art_list.str.startswith("20")]
logger.debug("Checked: %s", base_url + "_list.html")
sleep(10)
ret_articles = pd.concat([articles, art_list], ignore_index=True, names=[kind])
return ret_articles
@retry()
def get_description(base_domain: str, code: str, kind: str) -> pd.core.series.Series:
session = HTMLSession()
url = base_domain + code
r = session.get(url)
sleep(10)
try:
raw_title = r.html.find('title', first=True).text
raw_article = r.html.find('article', first=True).text
data_row = ["".join(a_text.replace("これは嘘ニュースです", "").replace("\n\n\n新しいアプリで記事を読む", "").split("\n"))
for a_text in [raw_title, raw_article]]
except AttributeError:
data_row = ["404", "Not Found"]
data = [code, kind] + data_row
data_sr = pd.Series(data, index=["code", "kind", "title", "article"])
return data_sr
def main():
base_domain = "http://kyoko-np.net/"
try:
urls = pickle.load(open("urls.pickle", "rb"))
except (OSError, IOError):
kinds = ["national", "politics", "business", "sport",
"international", "science", "culture", "entertainment"]
urls = pd.concat(
[get_articles(base_domain, kind) for kind in kinds],
axis=1, names=kinds
)
urls.columns = kinds
pickle.dump(urls, open("urls.pickle", "wb"))
df = | pd.DataFrame(columns=["code", "kind", "title", "article"]) | pandas.DataFrame |
""" Characteristic the heuristic algorithm. """
import argparse
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from gumi.pruning.mask_utils import group_sort, run_mbm
from gumi.model_runner import utils
# Plotting
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
plt.rcParams.update({"font.size": 22})
CMAP = plt.cm.inferno
# Seaborn style
import seaborn as sns
sns.set_style("whitegrid")
def parse_args():
parser = argparse.ArgumentParser(prog="Characteristic heuristic algorithm.")
parser.add_argument("-s", "--size", type=int, help="Size of the matrix")
parser.add_argument(
"--num-samples", default=1000, type=int, help="Size of the matrix"
)
parser.add_argument(
"-g", "--num-groups", nargs="+", type=int, help="Number of groups"
)
parser.add_argument(
"-a", "--archs", nargs="+", type=str, help="Models to be evaluated"
)
parser.add_argument(
"-i", "--num-iters", nargs="*", type=int, default=1, help="Number of iterations"
)
parser.add_argument(
"-m", "--min-g", type=int, default=0, help="Only move to group min_g"
)
parser.add_argument(
"--print-freq",
type=int,
default=100,
help="Print frequency when sampling data.",
)
parser.add_argument(
"-d",
"--dir",
type=str,
default="data",
metavar="PATH",
help="Directory to place all the data files.",
)
# options
parser.add_argument(
"--draw-model-stats",
action="store_true",
default=False,
help="Whether to draw the model statistics.",
)
parser.add_argument(
"--draw-rand-stats",
action="store_true",
default=False,
help="Whether to draw the random statistics.",
)
parser.add_argument(
"--resume",
action="store_true",
default=False,
help="Whether to reload previously computed data.",
)
return parser.parse_args()
def generate_test_matrix(size, G, scale=10):
""" First create the original matrix with diagonal blocks larger,
then randomly permute it. """
C = np.zeros((size, size))
SoG = size // G
for g in range(G):
R0 = np.random.rand(SoG, SoG)
R1 = scale * np.random.rand(SoG, SoG)
C[g * SoG : (g + 1) * SoG, g * SoG : (g + 1) * SoG] += R0 + R1
# shuffle
perm_cols = np.random.permutation(size)
perm_rows = np.random.permutation(size)
return C, C[perm_rows, :][:, perm_cols]
def plot_mat_to_file(mat, file_name):
""" """
fig, ax = plt.subplots()
cax = ax.matshow(mat)
fig.colorbar(cax)
fig.savefig(file_name)
def plot_mats(mats, file_name):
""" """
fig, axes = plt.subplots(ncols=len(mats))
for i in range(len(mats)):
cax = axes[i].matshow(mats[i], cmap=CMAP)
fig.savefig(file_name)
def get_cost(C, G):
""" Split into groups, collect the values on diagonal. """
SoG = C.shape[0] // G, C.shape[1] // G
cost = 0.0
for g in range(G):
cost += C[g * SoG[0] : (g + 1) * SoG[0], g * SoG[1] : (g + 1) * SoG[1]].sum()
return cost
def draw_example(C0, C, fp):
fig, axes = plt.subplots(ncols=2, figsize=(6, 3))
axes[0].set_title("Original")
axes[0].matshow(C0, cmap=CMAP)
axes[0].get_xaxis().set_ticks([])
axes[0].get_yaxis().set_ticks([])
axes[1].set_title("Permuted")
axes[1].matshow(C, cmap=CMAP)
axes[1].get_xaxis().set_ticks([])
axes[1].get_yaxis().set_ticks([])
plt.tight_layout()
fig.savefig(fp)
def draw_step_by_step(C, G, fp, total_cost, num_iters=100):
""" The step-by-step algorithm illustration graph"""
min_gs = [G - 1, G // 2, 0]
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(6, 6))
for idx, min_g in enumerate(min_gs):
ind_in, ind_out = group_sort(C, G, num_iters=num_iters, min_g=min_g)
C_ = C[ind_out, :][:, ind_in]
cost = get_cost(C_, G)
r, c = idx // 2, idx % 2
axes[r, c].set_title(
"$min_g = {} ({:.2f}\%)$".format(min_g + 1, cost / total_cost * 100)
)
axes[r, c].matshow(C_, cmap=CMAP)
axes[r, c].get_xaxis().set_ticks([])
axes[r, c].get_yaxis().set_ticks([])
# TODO: don't show these
# result from run MBM
gnd_in, gnd_out, cost = run_mbm(C, G, perm="GRPS", num_iters=num_iters)
ind_in = [i for l in gnd_in for i in l]
ind_out = [i for l in gnd_out for i in l]
C_ = C[ind_out, :][:, ind_in]
axes[1, 1].set_title("$MBM({:.2f}\%)$".format(get_cost(C_, G) / total_cost * 100))
axes[1, 1].matshow(C_, cmap=CMAP)
axes[1, 1].get_xaxis().set_ticks([])
axes[1, 1].get_yaxis().set_ticks([])
plt.tight_layout()
fig.savefig(fp)
def draw_stats(size, G, data_dir, num_iters=None, num_samples=500, **kwargs):
""" Collect the performance result """
if not num_iters:
num_iters = [1]
# sns.set_style('whitegrid')
fp = os.path.join(
data_dir,
"random_stats_NI_{}_NS_{}.pdf".format(
"-".join([str(i) for i in num_iters]), num_samples
),
)
fig, ax = plt.subplots(figsize=(5, 4))
for ni in num_iters:
print("Figuring out num_iters={}".format(ni))
ratios = collect_random_stats(
size, G, data_dir, num_iters=ni, num_samples=num_samples, **kwargs
)
# plot the histogram
sns.distplot(ratios, kde=False, label="$N_S={}$".format(ni), ax=ax)
ax.set_xlabel("Ratio")
ax.set_ylabel("Frequency")
ax.legend()
plt.tight_layout()
fig.savefig(fp)
def collect_random_stats(
size, G, data_dir, resume=False, num_samples=100, num_iters=100, print_freq=100
):
""" Collect the statistics from randomly sampled test matrices.
If resume is specified, we will load from the data file.
"""
# where the data file is stored
fp = os.path.join(
data_dir, "random_stats_NI_{}_NS_{}.npy".format(num_iters, num_samples)
)
# Decide how to deal with the stats data
if resume:
assert os.path.isfile(fp)
ratios = np.load(fp)
else:
ratios = np.zeros(num_samples) # where to store result.
for i in range(num_samples):
if i % print_freq == 0:
print("[{}/{}] Sampling ...".format(i, num_samples))
C0, C = generate_test_matrix(size, G)
gnd_in, gnd_out, cost = run_mbm(C, G, perm="GRPS", num_iters=num_iters)
ind_in = [i for l in gnd_in for i in l]
ind_out = [i for l in gnd_out for i in l]
C_ = C[ind_out, :][:, ind_in]
ratios[i] = get_cost(C_, G) / get_cost(C0, G)
# save to file
np.save(fp, ratios)
return ratios
def draw_model_stats(arch, grps, data_dir, num_iters=None):
""" Draw the statistics of several models """
if not num_iters:
num_iters = [1]
fp = os.path.join(
data_dir,
"model_stats_{}_NI_{}_G_{}.pdf".format(
arch,
"-".join([str(ni) for ni in num_iters]),
"-".join([str(g) for g in grps]),
),
)
print("Plot to file: {}".format(fp))
fig, ax = plt.subplots(figsize=(5, 4))
print("Running on model {} ...".format(arch))
model = utils.load_model(arch, "imagenet", pretrained=True)
results = {"num_iters": [], "num_groups": [], "ratio": []}
for ni in num_iters:
for G in grps:
print("G = {} NI = {}".format(G, ni))
mods = {}
# Collect statistics for a single model
for name, mod in model.named_modules():
if not isinstance(mod, nn.Conv2d):
continue
W = mod.weight
F, C = W.shape[:2]
if F % G != 0 or C % G != 0:
continue
C = W.norm(dim=(2, 3)).cpu().detach().numpy()
gnd_in, gnd_out, cost = run_mbm(C, G, perm="GRPS", num_iters=ni)
mods[name] = (cost, C.sum(), cost / C.sum() * 100)
# print('{:30s}\t {:.2e}\t {:.2e}\t {:.2f}%'.format(
# name, mods[name][0], mods[name][1], mods[name][2]))
# Summarise results
sum_cost = sum([val[0] for val in mods.values()])
total_cost = sum([val[1] for val in mods.values()])
results["num_iters"].append("$N_S={}$".format(ni))
results["num_groups"].append("$G={}$".format(G))
results["ratio"].append(sum_cost / total_cost * 100)
df = | pd.DataFrame(results) | pandas.DataFrame |
import subete
import pandas as pd
import matplotlib.pyplot as plt
repo = subete.load()
data = {}
data["language"] = [lang for lang in repo.language_collections().keys()]
data["total_programs"] = [lang.total_programs() for lang in repo.language_collections().values()]
data["total_size"] = [lang.total_size() for lang in repo.language_collections().values()]
data["total_line_count"] = [lang.total_line_count() for lang in repo.language_collections().values()]
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
from options_parser import arguments
options, args = arguments()
def brca_data(train_test=False, full_data=False):
# input cells over which the predsictive model is built
input_cells = open(options.cell_list, "r").read().splitlines()
input_cells = [ic.replace("-", "").upper() for ic in input_cells]
print("Parsed %s user-provided cell line names"%len(input_cells))
# baseline data
dfbase = pd.read_csv(options.baseline_data, index_col=0)
base_cells = dfbase.columns.tolist()
msg = "Loaded %s containing %s features and %s cell lines"
print(msg%(options.baseline_data, dfbase.shape[0], dfbase.shape[1]))
# list of genes (or proteins) which would serve as the features for predictive modeling
genes = open(options.gene_list, "r").read().splitlines()
print("Parsed %s user-provided gene names"%len(genes))
# GR-metrics data
dfgr = pd.read_csv(options.response_data)
dfgr = dfgr[dfgr.generic_name==options.drug]
dfgr.index=dfgr.cell_line
gr_cells = dfgr.index.tolist()
print("Loaded GR values for %s cell lines"%len(gr_cells))
# cells that are present in both, baseline as well GR-data
cells = list(set(input_cells).intersection(set(base_cells).intersection(set(gr_cells))))
dfbase = dfbase[cells]
dfgr = dfgr[dfgr.index.isin(cells)]
print("%s cell lines are in common to all sources"%len(cells))
# combine the baseline and dose response data into one dataFrame
dfc = pd.concat([dfbase[dfbase.index.isin(genes)].T,
dfgr[[options.metric, "sigma_%s"%options.metric]]], axis=1).dropna()
dff = pd.concat([dfbase.T,
dfgr[[options.metric, "sigma_%s"%options.metric]]], axis=1).dropna()
# train-test sets, with roughly 80-20 split, for rescursive feature elimination
dftest = | pd.read_csv(options.test_set, index_col=0) | pandas.read_csv |
# +
"""
Functions/classes/variables for interacting between a pandas DataFrame
and postgres/mysql/sqlite (and potentially other databases).
"""
import json
import pandas as pd
import logging
import re
from copy import deepcopy
from math import floor
from sqlalchemy import JSON, MetaData, select
from sqlalchemy.sql import null
from sqlalchemy.schema import (PrimaryKeyConstraint, CreateColumn, CreateSchema)
from alembic.runtime.migration import MigrationContext
from alembic.operations import Operations
from pangres.logger import log
from pangres.upsert import (mysql_upsert,
postgres_upsert,
sqlite_upsert)
# compile some regexes
# column names that will cause issues with psycopg2 default parameter style
# (so we will need to switch to format style when we see such columns)
RE_BAD_COL_NAME = re.compile('[\(\)\%]')
# e.g. match "(50)" in "VARCHAR(50)"
RE_CHARCOUNT_COL_TYPE = re.compile('(?<=.)+\(\d+\)')
# -
# # Class PandasSpecialEngine
class PandasSpecialEngine:
def __init__(self,
engine,
df,
table_name,
schema=None,
dtype=None):
"""
Interacts with SQL tables via pandas and SQLalchemy table models.
Attributes
----------
engine : sqlalchemy.engine.base.Engine
Engine provided during class instantiation
df : pd.DataFrame
DataFrame provided during class instantiation
table_name : str
Table name provided during class instantiation
schema : str or None
SQL schema provided during class instantiation
table : sqlalchemy.sql.schema.Table
Sqlalchemy table model for df
Parameters
----------
engine : sqlalchemy.engine.base.Engine
Engine from sqlalchemy (see https://docs.sqlalchemy.org/en/13/core/engines.html
and examples below)
df : pd.DataFrame
A pandas DataFrame
table_name : str
Name of the SQL table
schema : str or None, default None
Name of the schema that contains/will contain the table
For postgres defaults to "public" if not provided.
dtype : None or dict {str:SQL_TYPE}, default None
Similar to pd.to_sql dtype argument.
This is especially useful for MySQL where the length of
primary keys with text has to be provided (see Examples)
Examples
--------
>>> from sqlalchemy import create_engine
>>> from pangres.helpers import PandasSpecialEngine
>>>
>>> engine = create_engine("postgresql://user:[email protected]:5432/database")
>>> df = pd.DataFrame({'name':['Albert', 'Toto'],
... 'profileid':[10, 11]}).set_index('profileid')
>>> pse = PandasSpecialEngine(engine=engine, df=df, table_name='example')
>>> pse # doctest: +SKIP
PandasSpecialEngine (id 123546, hexid 0x1E29A)
* connection: Engine(postgresql://user:***@host.com:5432/database)
* schema: public
* table: example
* SQLalchemy table model:
Table('example', MetaData(bind=Engine(postgresql://user:***@host.com:5432/database)),
Column('profileid', BigInteger(), table=<example>, primary_key=True, nullable=False),
Column('name', Text(), table=<example>), schema='public')
* df.head():
| profileid | name |
|------------:|:-------|
| 10 | Albert |
| 11 | Toto |
"""
self._db_type = self._detect_db_type(engine)
if self._db_type == "postgres":
schema = 'public' if schema is None else schema
# raise if we find columns with "(", ")" or "%"
if any((RE_BAD_COL_NAME.search(col) for col in df.columns)):
err = ("psycopg2 (Python postgres driver) does not seem to support"
"column names with '%', '(' or ')' "
"(see https://github.com/psycopg/psycopg2/issues/167)")
raise ValueError(err)
# VERIFY ARGUMENTS
# all index levels have names
index_names = list(df.index.names)
if any([ix_name is None for ix_name in index_names]):
raise IndexError("All index levels must be named!")
# index is unique
if not df.index.is_unique:
err = ("The index must be unique since it is used "
"as primary key.\n"
"Check duplicates using this code (assuming df "
" is the DataFrame you want to upsert):\n"
">>> df.index[df.index.duplicated(keep=False)]")
raise IndexError(err)
# there are no duplicated names
fields = list(df.index.names) + df.columns.tolist()
if len(set(fields)) != len(fields):
raise ValueError(("There cannot be duplicated names amongst "
"index levels and/or columns!"))
# detect json columns
def is_json(col):
s = df[col].dropna()
return (not s.empty and
s.map(lambda x: isinstance(x, (list, dict))).all())
json_cols = [col for col in df.columns if is_json(col)]
# merge with dtype from user
new_dtype = {c:JSON for c in json_cols}
if dtype is not None:
new_dtype.update(dtype)
new_dtype = None if new_dtype == {} else new_dtype
# create sqlalchemy table model via pandas
pandas_sql_engine = pd.io.sql.SQLDatabase(engine=engine, schema=schema)
table = pd.io.sql.SQLTable(name=table_name,
pandas_sql_engine=pandas_sql_engine,
frame=df,
dtype=new_dtype).table
# change bindings of table (we want a sqlalchemy engine
# not a pandas_sql_engine)
metadata = MetaData(bind=engine)
table.metadata = metadata
# add PK
constraint = PrimaryKeyConstraint(*[table.columns[name]
for name in df.index.names])
table.append_constraint(constraint)
# add remaining attributes
self.engine = engine
self.df = df
self.schema = schema
self.table = table
@staticmethod
def _detect_db_type(engine) -> str:
"""
Identifies whether the dialect of given sqlalchemy
engine corresponds to postgres, mysql or another sql type.
Returns
-------
sql_type : {'postgres', 'mysql', 'sqlite', 'other'}
"""
dialect = engine.dialect.dialect_description
if re.search('psycopg|postgres', dialect):
return "postgres"
elif 'mysql' in dialect:
return "mysql"
elif 'sqlite' in dialect:
return 'sqlite'
else:
return "other"
def table_exists(self) -> bool:
"""
Returns True if the table defined in given instance
of PandasSpecialEngine exists else returns False.
Returns
-------
exists : bool
True if table exists else False
"""
return self.engine.has_table(self.table.name, schema=self.schema)
def create_schema_if_not_exists(self):
"""
Creates the schema defined in given instance of
PandasSpecialEngine if it does not exist.
"""
if (self.schema is not None and
not self.engine.dialect.has_schema(self.engine, self.schema)):
self.engine.execute(CreateSchema(self.schema))
def create_table_if_not_exists(self):
"""
Creates the table generated in given instance of
PandasSpecialEngine if it does not exist.
"""
self.table.create(checkfirst=True)
def get_db_columns_names(self) -> list:
"""
Gets the column names of the SQL table defined
in given instance of PandasSpecialEngine.
Returns
-------
db_columns_names : list
list of column names (str)
"""
columns_info = self.engine.dialect.get_columns(connection=self.engine,
table_name=self.table.name,
schema=self.schema)
db_columns_names = [col_info["name"] for col_info in columns_info]
return db_columns_names
def add_new_columns(self):
"""
Adds columns present in df but not in the SQL table
for given instance of PandasSpecialEngine.
Notes
-----
Sadly, it seems that we cannot create JSON columns.
"""
# create deepcopies of the column because we are going to unbound
# them for the table model (otherwise alembic would think we add
# a column that already exists in the database)
cols_to_add = [deepcopy(col) for col in self.table.columns
if col.name not in self.get_db_columns_names()]
# check columns are not index levels
if any((c.name in self.df.index.names for c in cols_to_add)):
raise ValueError(('Cannot add any column that is part of the df index!\n'
"You'll have to update your table primary key or change your "
"df index"))
with self.engine.connect() as con:
ctx = MigrationContext.configure(con)
op = Operations(ctx)
for col in cols_to_add:
col.table = None # Important! unbound column from table
op.add_column(self.table.name, col, schema=self.schema)
log(f"Added column {col} (type: {col.type}) in table {self.table.name} "
f'(schema="{self.schema})"')
def get_db_table_schema(self):
"""
Gets the sqlalchemy table model for the SQL table
defined in given PandasSpecialEngine (using schema and
table_name attributes to find the table in the database).
Returns
-------
db_table : sqlalchemy.sql.schema.Table
"""
table_name = self.table.name
schema = self.schema
engine = self.engine
metadata = MetaData(bind=engine, schema=schema)
metadata.reflect(bind=engine, schema=schema, only=[table_name])
namespace = table_name if schema is None else f'{schema}.{table_name}'
db_table = metadata.tables[namespace]
return db_table
def get_empty_columns(self) -> list:
"""
Gets a list of the columns that contain no data
in the SQL table defined in given instance of
PandasSpecialEngine.
Uses method get_db_table_schema (see its docstring).
Returns
-------
empty_columns : list of str
List of column names that contain no data
"""
db_table = self.get_db_table_schema()
empty_columns = []
for col in db_table.columns:
stmt = select(from_obj=db_table,
columns=[col],
whereclause=col.isnot(None)).limit(1)
results = self.engine.execute(stmt).fetchall()
if results == []:
empty_columns.append(col)
return empty_columns
def adapt_dtype_of_empty_db_columns(self):
"""
Changes the data types of empty columns in the SQL table defined
in given instance of a PandasSpecialEngine.
This should only happen in case of data type mismatches.
This means with columns for which the sqlalchemy table
model for df and the model for the SQL table have different data types.
"""
empty_db_columns = self.get_empty_columns()
db_table = self.get_db_table_schema()
# if column does not have value in db and there are values
# in the frame then change the column type if needed
for col in empty_db_columns:
# check if the column also exists in df
if col.name not in self.df.columns:
continue
# check same type
orig_type = db_table.columns[col.name].type.compile(self.engine.dialect)
dest_type = self.table.columns[col.name].type.compile(self.engine.dialect)
# remove character count e.g. "VARCHAR(50)" -> "VARCHAR"
orig_type = RE_CHARCOUNT_COL_TYPE.sub('', orig_type)
dest_type = RE_CHARCOUNT_COL_TYPE.sub('', dest_type)
# if same type or we want to insert TEXT instead of JSON continue
# (JSON is not supported on some DBs so it's normal to have TEXT instead)
if ((orig_type == dest_type) or
((orig_type == 'JSON') and (dest_type == 'TEXT'))):
continue
# grab the col/index from the df
# so we can check if there are any values
if col.name in self.df.index.names:
df_col = self.df.index.get_level_values(col.name)
else:
df_col = self.df[col.name]
if df_col.notna().any():
# raise error if we have to modify the dtype but we have a SQlite engine
# (SQLite does not support data type alteration)
if self._db_type == 'sqlite':
raise ValueError('SQlite does not support column data type alteration!')
with self.engine.connect() as con:
ctx = MigrationContext.configure(con)
op = Operations(ctx)
new_col = self.table.columns[col.name]
# check if postgres (in which case we have to use "using" syntax
# to alter columns data types)
if self._db_type == 'postgres':
escaped_col = str(new_col.compile(dialect=self.engine.dialect))
compiled_type = new_col.type.compile(dialect=self.engine.dialect)
alter_kwargs = {'postgresql_using':f'{escaped_col}::{compiled_type}'}
else:
alter_kwargs = {}
op.alter_column(table_name=self.table.name,
column_name=new_col.name,
type_=new_col.type,
schema=self.schema,
**alter_kwargs)
log(f"Changed type of column {new_col.name} "
f"from {col.type} to {new_col.type} "
f'in table {self.table.name} (schema="{self.schema}")')
@staticmethod
def _create_chunks(values, chunksize=10000):
"""
Chunks a list into a list of lists of size
:chunksize:.
Parameters
----------
chunksize : int > 0, default 10000
Number of values to be inserted at once,
an integer strictly above zero.
"""
if not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError('chunksize must be an integer strictly above 0')
chunks = [values[i:i + chunksize] for i in range(0, len(values), chunksize)]
return chunks
def _get_values_to_insert(self):
"""
Gets the values to be inserted from the pandas DataFrame
defined in given instance of PandasSpecialEngine
to the coresponding SQL table.
Returns
-------
values : list
Values from the df attribute that may have been converted
for SQL compability e.g. pd.Timestamp will be converted
to datetime.datetime objects.
"""
# this seems to be the most reliable way to unpack
# the DataFrame. For instance using df.to_dict(orient='records')
# can introduce types such as numpy integer which we'd have to deal with
values = self.df.reset_index().values.tolist()
for i in range(len(values)):
row = values[i]
for j in range(len(row)):
val = row[j]
# replace pd.Timestamp with datetime.datetime
if isinstance(val, pd.Timestamp):
values[i][j] = val.to_pydatetime()
# check if na unless it is list like
elif not | pd.api.types.is_list_like(val) | pandas.api.types.is_list_like |
import argparse
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
import glob
import os
from ML.DDModel import DDModel
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
parser = argparse.ArgumentParser()
parser.add_argument('-pr','--project',required=True,help='Location of project')
parser.add_argument('-sz_test','--size_test_set',required=True,help='Number of molecules in the test set')
parser.add_argument('-it_1','--start_iteration',required=True,help='Number of first iteration to analyze')
parser.add_argument('-it_2','--end_iteration',required=True,help='Number of last iteration to analyze')
parser.add_argument('-fo','--output_folder',required=True,help='Folder where to output the figures')
io_args = parser.parse_args()
path = io_args.project
num_molec = int(io_args.size_test_set)
it_1 = int(io_args.start_iteration)
it_2 = int(io_args.end_iteration)
path_out = io_args.output_folder
def get_zinc_and_labels(zinc_path, labels_path):
ids = []
with open(zinc_path,'r') as ref:
for line in ref:
ids.append(line.split(',')[0])
zincIDs = pd.DataFrame(ids, columns=['ZINC_ID'])
labels_df = pd.read_csv(labels_path, header=0)
combined_df = pd.merge(labels_df, zincIDs, how='inner', on=['ZINC_ID'])
return combined_df.set_index('ZINC_ID')
def get_all_x_data(morgan_path, ID_labels): # ID_labels is a dataframe containing the zincIDs and their corresponding labels.
train_set = np.zeros([num_molec,1024], dtype=bool) # using bool to save space
train_id = []
print('x data from:', morgan_path)
with open(morgan_path,'r') as ref:
line_no=0
for line in ref:
mol_info=line.rstrip().split(',')
train_id.append(mol_info[0])
# "Decompressing" the information from the file about where the 1s are on the 1024 bit vector.
bit_indicies = mol_info[1:] # array of indexes of the binary 1s in the 1024 bit vector representing the morgan fingerprint
for elem in bit_indicies:
train_set[line_no,int(elem)] = 1
line_no+=1
train_set = train_set[:line_no,:]
print('Done...')
train_pd = pd.DataFrame(data=train_set, dtype=np.uint8)
train_pd['ZINC_ID'] = train_id
score_col = ID_labels.columns.difference(['ZINC_ID'])[0]
train_data = | pd.merge(ID_labels, train_pd, how='inner',on=['ZINC_ID']) | pandas.merge |
import argparse
import os
import boto3
import pandas as pd
from io import StringIO
# Parse Command Line Arguments
parser = argparse.ArgumentParser(description='Add some integers.')
parser.add_argument('startitem', metavar='s', type=int,
help='What item of the OnlineRetail.csv should I start at')
parser.add_argument('enditem', metavar='e', type=int,
help='What item of the OnlineRetail.csv should I end at')
args = parser.parse_args()
START_ITEM = args.startitem;
END_ITEM = args.enditem;
print(f'Crunching stats for items #{START_ITEM} to #{END_ITEM} of OnlineRetail.csv')
# Read CSV into a Dataframe
print(f'Reading data from Key=rawdata/OnlineRetail.csv')
s3_resource = boto3.resource('s3')
S3_BUCKET = os.environ['S3_BUCKET']
bucket = s3_resource.Bucket(S3_BUCKET)
obj = bucket.Object(key=f"rawdata/OnlineRetail.csv")
df = pd.read_csv(obj.get()['Body'], sep=",", encoding = "utf-8")
items = df.Description.unique()
# Crunching Stats
stats_list = []
for i, i_description in enumerate(items[START_ITEM : END_ITEM]):
print(f'Crunching Stats for item: {i_description}')
item_df = df[df.Description == i_description]
stat = (
i + START_ITEM,
i_description,
item_df.Quantity.sum(),
item_df.UnitPrice.mean()
)
print(stat)
stats_list.append(stat)
del(item_df)
stats_df = | pd.DataFrame(stats_list, columns =['Item_Number', 'Item', 'Total_Units_Sold', 'Average_Price_Of_Unit']) | pandas.DataFrame |
import gym
import pandas as pd
import numpy as np
from numpy import inf
from gym import spaces
from sklearn import preprocessing
from statsmodels.tsa.statespace.sarimax import SARIMAX
from empyrical import sortino_ratio, calmar_ratio, omega_ratio
from render.BitcoinTradingGraph import BitcoinTradingGraph
from util.stationarization import log_and_difference
from util.benchmarks import buy_and_hodl, rsi_divergence, sma_crossover
from util.indicators import add_indicators
# Delete this if debugging
np.warnings.filterwarnings('ignore')
class BitcoinTradingEnv(gym.Env):
'''A Bitcoin trading environment for OpenAI gym'''
metadata = {'render.modes': ['human', 'system', 'none']}
viewer = None
def __init__(self, df, initial_balance=10000, commission=0.0025, reward_func='sortino', **kwargs):
super(BitcoinTradingEnv, self).__init__()
self.initial_balance = initial_balance
self.commission = commission
self.reward_func = reward_func
self.df = df.fillna(method='bfill').reset_index()
self.stationary_df = log_and_difference(
self.df, ['Open', 'High', 'Low', 'Close', 'Volume BTC', 'Volume USD'])
benchmarks = kwargs.get('benchmarks', [])
self.benchmarks = [
{
'label': 'Buy and HODL',
'values': buy_and_hodl(self.df['Close'], initial_balance, commission)
},
{
'label': 'RSI Divergence',
'values': rsi_divergence(self.df['Close'], initial_balance, commission)
},
{
'label': 'SMA Crossover',
'values': sma_crossover(self.df['Close'], initial_balance, commission)
},
*benchmarks,
]
self.forecast_len = kwargs.get('forecast_len', 10)
self.confidence_interval = kwargs.get('confidence_interval', 0.95)
self.obs_shape = (1, 5 + len(self.df.columns) -
2 + (self.forecast_len * 3))
# Actions of the format Buy 1/4, Sell 3/4, Hold (amount ignored), etc.
self.action_space = spaces.Discrete(12)
# Observes the price action, indicators, account action, price forecasts
self.observation_space = spaces.Box(
low=0, high=1, shape=self.obs_shape, dtype=np.float16)
def _next_observation(self):
scaler = preprocessing.MinMaxScaler()
features = self.stationary_df[self.stationary_df.columns.difference([
'index', 'Date'])]
scaled = features[:self.current_step + self.forecast_len + 1].values
scaled[abs(scaled) == inf] = 0
scaled = scaler.fit_transform(scaled.astype('float32'))
scaled = | pd.DataFrame(scaled, columns=features.columns) | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = | pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort") | pandas.Series |
#!/usr/bin/env python3
from argparse import ArgumentParser
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
def plot(combined_df):
systems = [
"Weak Spec.", "Weak Spec. + Search", "Expert + Search", "AMS + Search"
]
combined_df["order"] = combined_df["name"].map(lambda x: systems.index(x))
combined_df = combined_df.sort_values("order")
fig, ax = plt.subplots(1)
ax = sns.barplot(
data=combined_df,
x="search",
y="wins",
hue="name",
ci=None,
ax=ax,
)
ax.set_xlabel("Search")
ax.set_ylabel("Number of Wins")
plt.legend(
loc='upper center',
bbox_to_anchor=(0.5, 1.3),
title="Approach",
ncol=2
)
plt.tight_layout()
return ax
def combine_dfs(input_paths, search_names):
acc = []
for path, search in zip(input_paths, search_names):
df = | pd.read_csv(path) | pandas.read_csv |
from __future__ import annotations
from typing import Any, cast, Generator, Iterable, Optional, TYPE_CHECKING, Union
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from tanuki.data_store.data_type import DataType
from tanuki.data_store.index.index import Index
from tanuki.data_store.index.pandas_index import PandasIndex
from tanuki.database.data_token import DataToken
from .data_backend import DataBackend, ILocIndexer, LocIndexer
if TYPE_CHECKING:
from tanuki.data_store.index.index_alias import IndexAlias
from tanuki.data_store.query import Query
class PandasBackend(DataBackend):
_data: DataFrame
_index: PandasIndex
_loc: _LocIndexer
_iloc: _ILocIndexer
def __init__(
self,
data: Optional[Union(Series, DataFrame, dict[str, list])] = None,
index: Optional[PandasIndex] = None,
) -> None:
if data is None:
self._data = DataFrame(dtype="object")
elif type(data) is Series:
self._data = cast(Series, data).to_frame().transpose()
elif type(data) is DataFrame:
self._data = | DataFrame(data) | pandas.core.frame.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 16 12:00:00 2018
@author: <NAME>
"""
import pandas as pd
import os
import psycopg2
import networkx as nx
import csv
import itertools
import operator
import ast
from sqlalchemy import create_engine
import numpy as np
import igraph as ig
import copy
from collections import Counter
import sys
import math
import copy
from vtra.utils import load_config
from vtra.transport_network_creation import province_shapefile_to_network, add_igraph_generalised_costs_province_roads, province_shapefile_to_dataframe
def net_present_value(adaptation_options_dataframe,strategy_parameter,parameter_value_list,min_eal,max_eal,edge_options_dictionary,edge_width = 1.0, edge_length = 1.0):
for param_val in parameter_value_list:
st = adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'strategy'].values[0]
st_min_benefit = edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'min_benefit'].sum() + min_eal
st_max_benefit = edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'max_benefit'].sum() + max_eal
min_npv = st_min_benefit - edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'max_cost'].sum()
max_npv = st_max_benefit - edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'min_cost'].sum()
if adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'max_cost'].sum() > 0:
min_bc_ratio = 1.0*st_min_benefit/(edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'max_cost'].sum())
else:
min_bc_ratio = 0
if adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'min_cost'].sum() > 0:
max_bc_ratio = 1.0*st_max_benefit/(edge_width*edge_length*adaptation_options_dataframe.loc[adaptation_options_dataframe[strategy_parameter] == param_val,'min_cost'].sum())
else:
max_bc_ratio = 0
edge_options_dictionary.append({'strategy':st,'min_npv':min_npv,'max_npv':max_npv,'min_bc_ratio':min_bc_ratio,'max_bc_ratio':max_bc_ratio})
return edge_options_dictionary
def main():
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
'''
cols = ['band_name','band_num','climate_scenario','commune_id','commune_name','district_id','district_name',
'edge_id','hazard_type','max_val','min_val','model','probability','province_id','province_name','sector',
'year','length','road_cond','asset_type','width','min_econ_loss','max_econ_loss']
'''
start_year = 2016
end_year = 2050
truck_unit_wt = [5.0,20.0]
discount_rate = 12.0
total_discount_ratio = []
for year in range(start_year,end_year):
# total_discount_ratio += 1.0/math.pow(1.0 + 1.0*discount_rate/100.0,year - start_year)
total_discount_ratio.append(1.0/math.pow(1.0 + 1.0*discount_rate/100.0,year - start_year))
# total_discount_ratio = sum(total_discount_ratio_list)
df_path = os.path.join(data_path,'Adaptation_options','adaptation_options.xlsx')
adaptation_df = pd.read_excel(df_path,sheet_name = 'adapt_options')
adaptation_df['total_discount_ratio'] = sum(total_discount_ratio)
min_maintain_discount_ratio_list = []
max_maintain_discount_ratio_list = []
for iter_, row in adaptation_df.iterrows():
min_maintain_schedule = row['maintenance_times_min']
max_maintain_schedule = row['maintenance_times_max']
min_maintain_discount_ratio = 0
max_maintain_discount_ratio = 0
max_maintain_discount_years = np.arange(start_year,end_year,min_maintain_schedule)
min_maintain_discount_years = np.arange(start_year,end_year,max_maintain_schedule)
for year in max_maintain_discount_years[1:]:
max_maintain_discount_ratio += 1.0/math.pow(1.0 + 1.0*discount_rate/100.0,year - start_year)
for year in min_maintain_discount_years[1:]:
min_maintain_discount_ratio += 1.0/math.pow(1.0 + 1.0*discount_rate/100.0,year - start_year)
min_maintain_discount_ratio_list.append(min_maintain_discount_ratio)
max_maintain_discount_ratio_list.append(max_maintain_discount_ratio)
adaptation_df['min_maintain_discount_ratio'] = min_maintain_discount_ratio_list
adaptation_df['max_maintain_discount_ratio'] = max_maintain_discount_ratio_list
adaptation_df['min_benefit'] = adaptation_df['rehab_cost_min']*adaptation_df['total_discount_ratio']
adaptation_df['max_benefit'] = adaptation_df['rehab_cost_max']*adaptation_df['total_discount_ratio']
adaptation_df['min_cost'] = adaptation_df['adapt_cost_min']*adaptation_df['total_discount_ratio'] + adaptation_df['maintain_cost_min']*adaptation_df['min_maintain_discount_ratio']
adaptation_df['max_cost'] = adaptation_df['adapt_cost_max']*adaptation_df['total_discount_ratio'] + adaptation_df['maintain_cost_max']*adaptation_df['max_maintain_discount_ratio']
print (adaptation_df)
'''
Add new strategy
cols = ['strategy','asset_type','asset_cond','location','height_m','adapt_cost_min','adapt_cost_max',
'maintain_cost_min','maintain_cost_max','rehab_cost_min','rehab_cost_max','height_incr_min',
'height_incr_max','maintenance_times_min','maintenance_times_max','cost_unit']
'''
cols = ['strategy','asset_type','asset_cond','location','height_m','cost_unit','min_cost','max_cost','min_benefit','max_benefit']
adaptation_options = adaptation_df[cols]
ad_opt = []
st_desc = ['road change','roads','unpaved-paved','all',0,'$/m2']
st_min_cost = adaptation_df.loc[adaptation_df['asset_cond'] == 'paved','min_cost'].sum()
st_max_cost = adaptation_df.loc[adaptation_df['asset_cond'] == 'paved','max_cost'].sum()
st_min_benefit = adaptation_df.loc[adaptation_df['asset_cond'] == 'unpaved','rehab_cost_min'].sum()*total_discount_ratio[0] + adaptation_df.loc[adaptation_df['asset_cond'] == 'paved','rehab_cost_min'].sum()*sum(total_discount_ratio[1:])
st_max_benefit = adaptation_df.loc[adaptation_df['asset_cond'] == 'unpaved','rehab_cost_max'].sum()*total_discount_ratio[0] + adaptation_df.loc[adaptation_df['asset_cond'] == 'paved','rehab_cost_max'].sum()*sum(total_discount_ratio[1:])
ad_opt.append(st_desc + [st_min_cost,st_max_cost,st_min_benefit,st_max_benefit])
new_ht = 6
st_desc = ['dyke building','dyke','rural','sea',new_ht,'million USD/km']
st_min_cost = adaptation_df.loc[adaptation_df['height_m'] == 4,'min_cost'].sum() + (new_ht - 4)*adaptation_df.loc[adaptation_df['height_m'] == 4,'height_incr_min'].sum()*sum(total_discount_ratio)
st_max_cost = adaptation_df.loc[adaptation_df['height_m'] == 4,'max_cost'].sum() + (new_ht - 4)*adaptation_df.loc[adaptation_df['height_m'] == 4,'height_incr_max'].sum()*sum(total_discount_ratio)
st_min_benefit = adaptation_df.loc[adaptation_df['height_m'] == 4,'min_benefit'].sum()
st_max_benefit = adaptation_df.loc[adaptation_df['height_m'] == 4,'max_benefit'].sum()
ad_opt.append(st_desc + [st_min_cost,st_max_cost,st_min_benefit,st_max_benefit])
ad_opt_df = pd.DataFrame(ad_opt,columns = cols)
adaptation_options = adaptation_options.append(ad_opt_df, ignore_index=True)
print (adaptation_options)
cols = ['band_num','climate_scenario',
'edge_id','hazard_type','max_val','min_val','model','probability',
'year','exposure_length']
# index_cols = ['edge_id','hazard_type','model','climate_scenario','year','road_cond','asset_type','width','road_length']
selection_criteria = ['commune_id','hazard_type','model','climate_scenario','year']
filter_cols = ['edge_id','exposed_length'] + selection_criteria
# provinces to consider
province_list = ['Lao Cai','Binh Dinh','Thanh Hoa']
province_terrian = ['mountain','flat','flat']
growth_scenarios = [(5,'low'),(6.5,'forecast'),(10,'high')]
base_year = 2016
types = ['min','max']
fail_scenarios_data = os.path.join(output_path,'hazard_scenarios','province_roads_hazard_intersections.xlsx')
rd_prop_file = os.path.join(data_path,'Roads','road_properties','road_properties.xlsx')
duration_max = [10,15,20,25,30]
length_thr = 100.0
for prn in range(len(province_list)):
# for prn in range(1,3):
province = province_list[prn]
# set all paths for all input files we are going to use
province_name = province.replace(' ','').lower()
all_edge_fail_scenarios = pd.read_excel(fail_scenarios_data,sheet_name = province_name)
all_edge_fail_scenarios.loc[all_edge_fail_scenarios['probability'] == 'none', 'probability'] = 1.0
all_edge_fail_scenarios['probability'] = | pd.to_numeric(all_edge_fail_scenarios['probability']) | pandas.to_numeric |
import os
import collections
import argparse
import numpy as np
import pandas as pd
import statistics as stat
from datetime import datetime, timedelta, date
import plotly.graph_objects as go
import dash # (version 1.12.0) pip install dash
import dash_table
from dash_table.Format import Format, Scheme
from dash_table import FormatTemplate
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
import dash_bootstrap_components as dbc
from src.tos_api_calls import tos_search, tos_get_quotes, tos_get_option_chain, tos_get_price_hist
from src.tos_helper import create_pricelist
from src.gbm import prob_over, prob_under
from src.stats import get_hist_volatility, prob_cone, get_prob
# app = dash.Dash(__name__)
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
# Docker support
parser = argparse.ArgumentParser()
parser.add_argument("--docker", help="Change the default server host to 0.0.0.0", action='store_true')
args = parser.parse_args()
# API credentials
API_KEY = os.environ.get('TOS_API_KEY')
# Data Table Properties
PAGE_SIZE = 30
# ------------------------------------------------------------------------------
# Dash table value formatting
money = FormatTemplate.money(0)
money_full=FormatTemplate.money(2)
percentage = FormatTemplate.percentage(2)
# Define column names in Ticker Pandas Dataframe
ticker_df_columns=[
dict(id='ticker', name='Ticker'),
dict(id='hist_volatility_1Y', name='1Y Hist. Vol', type='numeric', format=percentage),
dict(id='hist_volatility_3m', name='3M Hist. Vol', type='numeric', format=percentage),
dict(id='hist_volatility_1m', name='1M Hist. Vol', type='numeric', format=percentage),
dict(id='hist_volatility_2w', name='2w Hist. Vol', type='numeric', format=percentage),
dict(id='skew_category', name='Skew Category'),
dict(id='skew', name='Skew'),
dict(id='liquidity', name='Liquidity'),
]
# Define column names in Options Chain Pandas Dataframe
option_chain_df_columns=[
dict(id='ticker', name='Ticker'),
dict(id='exp_date', name='Exp. Date (Local)'),
dict(id='option_type', name='Option Type'),
dict(id='strike_price', name='Strike', type='numeric', format=money_full),
dict(id='exp_days', name='Exp. Days'),
dict(id='delta', name='Delta'),
dict(id='prob_val', name='Conf. Prob', type='numeric', format=percentage),
dict(id='open_interest', name='Open Int.', type='numeric', format=Format().group(True)),
dict(id='total_volume', name='Total Vol.', type='numeric', format=Format().group(True)),
dict(id='premium', name='Premium', type='numeric', format=money),
dict(id='option_leverage', name='Leverage'),
dict(id='bid_size', name='Bid Size', type='numeric', format=Format().group(True)),
dict(id='ask_size', name='Ask Size', type='numeric', format=Format().group(True)),
dict(id='roi_val', name='ROI')
]
# ------------------------------------------------------------------------------
# App layout
app.layout = html.Div([
dcc.Store(id='storage-historical'),
dcc.Store(id='storage-quotes'),
dcc.Store(id='storage-option-chain-all'),
dbc.Navbar(
[
html.A(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(dbc.NavbarBrand("TOS Options Wheel Dashboard", className="ml-2")),
],
align="center",
no_gutters=True,
),
href="#",
),
],
color="dark",
dark=True,
),
html.Div([
# Dropdown field of available stock tickers based on description input
html.Div([
dbc.Row([
dbc.Col(html.H5("Stock Description(s):", style={'text-align': 'left'}), width="auto"),
dbc.Col(
dbc.Checklist(
options=[
{"label": "Ticker Mode", "value": True},
],
value=[],
id="ticker_switch__input",
inline=True,
switch=True,
)
)
]),
dcc.Dropdown(
id="memory-ticker",
placeholder="Enter a valid stock name.",
multi=True,
style={'width': "100%"}
),
],
style={
'padding': '10px 5px'
}
),
html.Div([
dbc.Row(
[dbc.Col(
html.Div([
html.H5("ROI Range:"),
dcc.Dropdown(
id="memory-roi",
options=[
{"label": "More than 0.5%", "value": 0.5},
{"label": "More than 1%", "value": 1.00},
{"label": "More than 2%", "value": 2.00},
{"label": "More than 3%", "value": 3.00},
{"label": "Ignore ROI", "value": 0.00}
],
multi=False,
value=1.00
)
],
# style={'width': '30%', 'float': 'left', 'display': 'inline-block'}
)
),
dbc.Col(
html.Div([
html.H5("Delta Range:"),
dcc.Dropdown(
id="memory-delta",
options=[
{"label": "Less than 0.2", "value": 0.2},
{"label": "Less than 0.3", "value": 0.3},
{"label": "Less than 0.4", "value": 0.4},
{"label": "Ignore Delta", "value": 1.0}
],
multi=False,
value=1.00
)
],
# style={'width': '30%', 'display': 'inline-block'}
)
),
dbc.Col(
html.Div([
html.H5("Option Contract Type:"),
dcc.Dropdown(
id="memory-contract-type",
options=[
{"label": "Put", "value": "PUT"},
{"label": "Call", "value": "CALL"},
{"label": "All", "value": "ALL"}],
multi=False,
value="ALL"
)
],
# style={'width': '30%', 'float': 'right', 'display': 'inline-block'}
)
),
]
),
],
style={
# 'borderBottom': 'thin lightgrey solid',
'padding': '10px 5px'
}
),
html.Div([
dbc.Row(
[dbc.Col(
html.Div([
html.H5("Day(s) to Expiration:"),
dcc.Dropdown(
id="memory-expdays",
options=[
{"label": "0 - 7 days", "value": 7},
{"label": "0 - 14 days", "value": 14},
{"label": "0 - 21 days", "value": 21},
{"label": "0 - 28 days", "value": 28},
{"label": "0 - 35 days", "value": 35},
{"label": "0 - 42 days", "value": 42},
{"label": "0 - 49 days", "value": 49},
{"label": "0 - 56 days", "value": 56}
],
multi=False,
value=14.00
)
],
# style={'width': '30%', 'float': 'left', 'display': 'inline-block'}
)
),
dbc.Col(
html.Div([
html.H5("Confidence Level:"),
dcc.Dropdown(
id="memory-confidence",
options=[
{"label": "30% Confidence", "value": 0.3},
{"label": "35% Confidence", "value": 0.35},
{"label": "40% Confidence", "value": 0.4},
{"label": "45% Confidence", "value": 0.45},
{"label": "50% Confidence", "value": 0.5},
{"label": "55% Confidence", "value": 0.55},
{"label": "60% Confidence", "value": 0.6},
{"label": "65% Confidence", "value": 0.65},
{"label": "70% Confidence", "value": 0.7},
{"label": "75% Confidence", "value": 0.75},
{"label": "80% Confidence", "value": 0.8},
{"label": "85% Confidence", "value": 0.85},
{"label": "90% Confidence", "value": 0.9}
],
multi=False,
value=0.3
)
],
# style={'width': '30%', 'display': 'inline-block'}
)
),
dbc.Col(
html.Div([
html.H5("Hist. Volatility Period:"),
dcc.Dropdown(
id="memory-vol-period",
options=[
{"label": "2 Weeks", "value": "2W"},
{"label": "1 Month", "value": "1M"},
{"label": "3 Months", "value": "3M"},
{"label": "1 Year", "value": "1Y"}
],
multi=False,
value="1M"
)
],
# style={'width': '30%', 'display': 'inline-block'}
)
),
]
),
],
style={
# 'borderBottom': 'thin lightgrey solid',
'padding': '10px 5px'
}
),
html.Div([
dbc.Button("Submit", id='submit-button-state', color="info")
],
style={'margin-bottom': '10px',
'textAlign':'center',
'width': '220px',
'margin':'auto'
}
),
],
className="pretty_container",
# style={'padding-left': '50px',
# 'padding-right': '50px',
# }
),
html.Div([
html.Div([
dcc.Tabs(id='tabs_price_chart', value='price_tab_1', children=[
dcc.Tab(label='1 Day', value='price_tab_1', className='custom-tab'),
dcc.Tab(label='5 Days', value='price_tab_2', className='custom-tab'),
dcc.Tab(label='1 Month', value='price_tab_3', className='custom-tab'),
dcc.Tab(label='1 Year', value='price_tab_4', className='custom-tab'),
dcc.Tab(label='5 Years', value='price_tab_5', className='custom-tab'),
]),
dcc.Loading(
id="loading_price_chart",
type="default",
children=html.Div([
dcc.Graph(
id='price_chart',
figure={
'layout':{'title': {'text':'Price History'}}
},
config={"displayModeBar": False, "scrollZoom": True}
)
])
)
],
className="pretty_container",
style={'width': '48%', 'display': 'inline-block'}
),
html.Div([
dcc.Tabs(id='tabs_prob_chart', value='prob_cone_tab', children=[
dcc.Tab(label='Historical Volatility', value='prob_cone_tab', className='custom-tab'),
dcc.Tab(label='GBM Simulation', value='gbm_sim_tab', className='custom-tab'),
]),
dcc.Loading(
id="loading_prob_cone",
type="default",
children=html.Div([
dcc.Graph(
id='prob_cone_chart',
figure={
'layout':{'title': {'text':'Probability Cone'}}
},
config={"displayModeBar": False, "scrollZoom": True}
)
])
)
],
className="pretty_container",
style={'width': '48%', 'float': 'right', 'display': 'inline-block'}
)
]),
html.Div([
dcc.Dropdown(
id="memory_exp_day_graph",
placeholder="Select expiration date after entering the above fields.",
multi=False,
style={'width': "100%", 'padding-bottom': '10px',}
),
dcc.Loading(
id="loading_open_ir_vol",
type="default",
children=html.Div([
dcc.Graph(
id='open_ir_vol',
figure={
'layout':{'title': {'text':'Open Interest/Volume Plot'}}
},
config={"displayModeBar": False, "scrollZoom": True}
)
])
),
],
className="pretty_container",
),
html.Div([
html.H5("Ticker Data \u2754", id='ticker_data'), # Source: https://unicode.org/emoji/charts/full-emoji-list.html
dbc.Collapse(
dbc.Card(dbc.CardBody(dcc.Markdown('''
Call skew is defined as the price of 10% OTM calls/10% OTM puts for the next monthly option expiry. \n
A call skew of 1.3 means the 10% OTM call is 1.3x the price of 10% OTM put. \n
Potential Causes of Call Skew:
* Unusual and extreme speculation of stocks. Eg: TSLA battery day
* Stocks with limited downside but very high upside. Eg: Bankrupt stock trading at a fraction of book value, or SPACs near PIPE value
* High demand/Low supply for calls, driving up call prices.
* Low demand/High supply for puts, driving down put prices.
\n
Put skew is defined as the price of 10% OTM puts/10% OTM calls for the next monthly option expiry. \n
A put skew of 2.1 means the 10% OTM put is 2.1x the price of 10% OTM call. \n
Potential Causes of Put Skew:
* Insitutions using the Collar strategy, buying puts and selling calls to limit downside at the cost of upside.
* High demand/Low supply for puts, driving up put prices. Usual occurence during bear markets.
* Low demand/High supply for calls, driving down calls prices.
* Stock had recent huge upward price movements and is grossly overvalued, shifting the supply/demand curve for puts.
* Dividends contributes to put skew, especially if the dividend is a large percentage of stock price.
'''))),
id="ticker_table_collapse_content",
),
dcc.Loading(
id="loading_ticker-data-table",
type="default",
children=html.Div([
dash_table.DataTable(
id='ticker-data-table',
columns=ticker_df_columns,
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
sort_action='custom',
sort_mode='multi',
sort_by=[],
style_cell={'textAlign': 'left'},
style_data_conditional=[
{
'if': {
# 'column_id': 'bid_size',
'filter_query': '{liquidity} contains "FAILED"'
},
'backgroundColor': '#FF4136',
'color': 'white',
},
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)
])
),
],
style={
'max-width': '1450px',
'padding': '10px 5px',
'margin': 'auto'
}
),
html.Div([
html.H5("Option Chain Data"),
dcc.Loading(
id="loading_option-chain-table",
type="default",
children=html.Div([
dash_table.DataTable(
id='option-chain-table',
columns=option_chain_df_columns,
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
sort_action='custom',
sort_mode='multi',
sort_by=[],
style_cell={'textAlign': 'left'},
style_data_conditional=[
{
'if': {
'column_id': 'bid_size',
'filter_query': '{bid_size} < {ask_size}'
},
'color': 'tomato',
'fontWeight': 'bold'
},
{
'if': {
'column_id': 'total_volume',
'filter_query': '{total_volume} > {open_interest}'
},
'color': 'green',
'fontWeight': 'bold'
},
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)
])
),
],
style={
'max-width': '1450px',
'padding': '10px 5px',
'margin': 'auto'
}
)
])
# ------------------------------------------------------------------------------
# Connect the Plotly graphs with Dash Components
# Toggle collapsable content for ticker_data HTML element
@app.callback(
Output("ticker_table_collapse_content", "is_open"),
[Input("ticker_data", "n_clicks")],
[State("ticker_table_collapse_content", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# Update Search bar to return possible listed equity options based on input value as a list
@app.callback(Output('memory-ticker', 'options'),
[Input('memory-ticker', 'search_value'), Input('ticker_switch__input','value')],
[State('memory-ticker', 'value')]
)
def update_search(search_value, ticker_switch, value):
if not search_value:
raise PreventUpdate
if ticker_switch:
json_data = tos_search(search_value, projection='symbol-search', apiKey=API_KEY)
else:
json_data = tos_search(search_value, projection='desc-search', apiKey=API_KEY)
try:
#To-do: Dynamic Options (https://dash.plotly.com/dash-core-components/dropdown)
options = [{"label": dict_item['description'] + ' (Symbol: ' + dict_item['symbol'] + ')', "value": dict_item['symbol']} for dict_item in json_data.values()]
if value is not None:
for selection in value:
options.append({"label":selection, "value":selection})
return options
except:
return []
# Temporarily stores JSON data in the browser (generally safe to store up to 2MB of data)
@app.callback(Output('storage-historical', 'data'),
[Input('submit-button-state', 'n_clicks')],
[State('memory-ticker', 'value')])
def get_historical_prices(n_clicks, ticker_ls):
if ticker_ls is None:
raise PreventUpdate
json_data = {}
for ticker in ticker_ls:
json_data[ticker] = tos_get_price_hist(ticker, apiKey=API_KEY)
return json_data
# Temporarily stores JSON data in the browser (generally safe to store up to 2MB of data)
@app.callback(Output('storage-quotes', 'data'),
[Input('submit-button-state', 'n_clicks')],
[State('memory-ticker', 'value')])
def get_price_quotes(n_clicks, ticker_ls):
if ticker_ls is None:
raise PreventUpdate
ticker_query = ""
for ticker in ticker_ls:
ticker_query += ticker + ","
return tos_get_quotes(ticker_query, apiKey=API_KEY)
# Temporarily stores JSON data in the browser (generally safe to store up to 2MB of data)
@app.callback(Output('storage-option-chain-all', 'data'),
[Input('submit-button-state', 'n_clicks')],
[State('memory-ticker', 'value')])
def get_option_chain_all(n_clicks, ticker_ls):
if ticker_ls is None:
raise PreventUpdate
json_data = {}
for ticker in ticker_ls:
json_data[ticker] = tos_get_option_chain(ticker, contractType='ALL', rangeType='ALL', apiKey=API_KEY)
return json_data
# Update Ticker Table from API Response call
@app.callback(Output('ticker-data-table', 'data'),
[Input('submit-button-state', 'n_clicks'), Input('storage-historical', 'data'), Input('storage-option-chain-all', 'data'), Input('ticker-data-table', "page_current"), Input('ticker-data-table', "page_size"), Input('ticker-data-table', "sort_by")],
[State('memory-ticker', 'value')])
def on_data_set_ticker_table(n_clicks, hist_data, optionchain_data, page_current, page_size, sort_by, ticker_ls):
# Define empty list to be accumulate into Pandas dataframe (Source: https://stackoverflow.com/questions/10715965/add-one-row-to-pandas-dataframe)
insert = []
if ticker_ls is None:
raise PreventUpdate
for ticker in ticker_ls:
option_chain_response = optionchain_data[ticker]
hist_price = hist_data[ticker]
# Sanity check on API response data
if option_chain_response is None or list(option_chain_response.keys())[0] == "error":
raise PreventUpdate
# Create and append a list of historical share prices of specified ticker
PRICE_LS = create_pricelist(hist_price)
trailing_3mth_price_hist = PRICE_LS[-90:]
trailing_1mth_price_hist = PRICE_LS[-30:]
trailing_2wks_price_hist = PRICE_LS[-14:]
hist_volatility_1Y = get_hist_volatility(PRICE_LS)
hist_volatility_3m = get_hist_volatility(trailing_3mth_price_hist)
hist_volatility_1m = get_hist_volatility(trailing_1mth_price_hist)
hist_volatility_2w = get_hist_volatility(trailing_2wks_price_hist)
stock_price = option_chain_response['underlyingPrice']
stock_price_110percent = stock_price * 1.1
stock_price_90percent = stock_price * 0.9
# Process API response data from https://developer.tdameritrade.com/option-chains/apis/get/marketdata/chains into Dataframe
# Calculation for put call skew: https://app.fdscanner.com/aboutskew
low_call_strike, high_call_strike, low_put_strike, high_put_strike = None, None, None, None
for option_chain_type in ['call','put']:
for exp_date in option_chain_response[f'{option_chain_type}ExpDateMap'].keys():
# Note: example of exp_date is '2020-12-24:8' where 8 is the days to expiry
day_diff = int(exp_date.split(':')[1])
if day_diff < 28 or day_diff >= 35:
continue
# Define boolean variables because option chain is read in acsending order based on strike price
high_call_strike_found = False
high_put_strike_found = False
for strike in option_chain_response[f'{option_chain_type}ExpDateMap'][exp_date].values():
strike_price = strike[0]['strikePrice']
if option_chain_type == 'call':
if strike_price < stock_price_90percent:
low_call_strike = strike_price
low_call_strike_bid = strike[0]['bid']
low_call_strike_ask = strike[0]['ask']
elif strike_price > stock_price_110percent:
if not high_call_strike_found:
high_call_strike = strike_price
high_call_strike_bid = strike[0]['bid']
high_call_strike_ask = strike[0]['ask']
high_call_strike_found = True
elif option_chain_type == 'put':
if strike_price < stock_price_90percent:
low_put_strike = strike_price
low_put_strike_bid = strike[0]['bid']
low_put_strike_ask = strike[0]['ask']
elif strike_price > stock_price_110percent:
if not high_put_strike_found:
high_put_strike = strike_price
high_put_strike_bid = strike[0]['bid']
high_put_strike_ask = strike[0]['ask']
high_put_strike_found = True
# Ensure if there is an error, will not be displayed
strike_checklist = [low_call_strike, high_call_strike, low_put_strike, high_put_strike]
if (all(item is None for item in strike_checklist)):
raise PreventUpdate
# Ensuring options pass liquidity checks
prevent_zero_div = lambda x, y: 0 if (y == 0 or y == None) else x/y
high_call_strike_askbid = prevent_zero_div(high_call_strike_ask, high_call_strike_bid)
high_put_strike_askbid = prevent_zero_div(high_put_strike_ask, high_put_strike_bid)
low_call_strike_askbid = prevent_zero_div(low_call_strike_ask, low_call_strike_bid)
low_put_strike_askbid = prevent_zero_div(low_put_strike_ask, low_put_strike_bid)
askbid_checklist = [high_call_strike_askbid, high_put_strike_askbid, low_call_strike_askbid, low_put_strike_askbid]
liquidity_check = all(askbid > 1.25 for askbid in askbid_checklist)
if liquidity_check:
liquidity = 'FAILED'
else:
liquidity = 'PASSED'
# Computing option midpoints
high_call_strike_midpoint = (high_call_strike_bid + high_call_strike_ask)/2
high_put_strike_midpoint = (high_put_strike_bid + high_put_strike_ask)/2
low_call_strike_midpoint = (low_call_strike_bid + low_call_strike_ask)/2
low_put_strike_midpoint = (low_put_strike_bid + low_put_strike_ask)/2
# Computing Interpolated Price
call_110percent_price = low_call_strike_midpoint - (low_call_strike_midpoint - high_call_strike_midpoint)/(high_call_strike-low_call_strike) * (stock_price_110percent-low_call_strike)
put_90percent_price = low_put_strike_midpoint + (high_put_strike_midpoint - low_put_strike_midpoint)/(high_put_strike - low_put_strike) * (stock_price_90percent - low_put_strike)
# Calculate Skew
if put_90percent_price > call_110percent_price:
skew_category = 'Put Skew'
skew = round(put_90percent_price/call_110percent_price,3)
else:
skew_category = 'Call Skew'
skew = round(call_110percent_price/put_90percent_price,3)
insert.append([ticker, hist_volatility_1Y, hist_volatility_3m, hist_volatility_1m, hist_volatility_2w, skew_category, skew, liquidity])
# Create Empty Dataframe to be populated
df = | pd.DataFrame(insert, columns=[column['id'] for column in ticker_df_columns]) | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = | pd.Series([15., 20., 30.], dtype='float') | pandas.Series |
import os
from math import floor, ceil
from pprint import pprint
import csv
import argparse
import simuran
import pandas as pd
import matplotlib.pyplot as plt
import astropy.units as u
from neurochat.nc_lfp import NLfp
import numpy as np
from scipy.signal import coherence
from skm_pyutils.py_table import list_to_df, df_from_file, df_to_file
from skm_pyutils.py_config import parse_args
import seaborn as sns
from scipy.signal import welch
try:
from lfp_atn_simuran.analysis.lfp_clean import LFPClean
do_analysis = True
except ImportError:
do_analysis = False
from neuronal.decoding import LFPDecoder
def decoding(lfp_array, groups, labels, base_dir):
for group in ["Control", "Lesion (ATNx)"]:
correct_groups = groups == group
lfp_to_use = lfp_array[correct_groups, :]
labels_ = labels[correct_groups]
decoder = LFPDecoder(
labels=labels_,
mne_epochs=None,
features=lfp_to_use,
cv_params={"n_splits": 100},
)
out = decoder.decode()
print(decoder.decoding_accuracy(out[2], out[1]))
print("\n----------Cross Validation-------------")
decoder.cross_val_decode(shuffle=False)
pprint(decoder.cross_val_result)
pprint(decoder.confidence_interval_estimate("accuracy"))
print("\n----------Cross Validation Control (shuffled)-------------")
decoder.cross_val_decode(shuffle=True)
pprint(decoder.cross_val_result)
pprint(decoder.confidence_interval_estimate("accuracy"))
random_search = decoder.hyper_param_search(verbose=True, set_params=False)
print("Best params:", random_search.best_params_)
decoder.visualise_features(output_folder=base_dir, name=f"_{group}")
def main(
excel_location,
base_dir,
plot_individual_sessions,
do_coherence=True,
do_decoding=True,
overwrite=False,
):
# Setup
df = df_from_file(excel_location)
cfg = simuran.parse_config()
delta_min = cfg["delta_min"]
delta_max = cfg["delta_max"]
theta_min = cfg["theta_min"]
theta_max = cfg["theta_max"]
clean_method = cfg["clean_method"]
clean_kwargs = cfg["clean_kwargs"]
window_sec = 0.5
fmin, fmax = 2.0, 40
max_lfp_lengths_seconds = {"start": 20, "choice": (3.5, 0.5), "end": 10}
ituples = df.itertuples()
num_rows = len(df)
no_pass = False
if "passed" not in df.columns:
print("Please add passed as a column to the df.")
no_pass = True
results = []
coherence_df_list = []
base_dir_new = os.path.dirname(excel_location)
here = os.path.dirname(os.path.abspath(__file__))
decoding_loc = out_name = os.path.join(
here, "..", "sim_results", "tmaze", "lfp_decoding.csv"
)
lfp_len = 6
hf = lfp_len // 2
new_lfp = np.zeros(shape=(num_rows // 2, lfp_len))
groups = []
choices = []
pxx_arr = []
oname_coherence = os.path.join(
here, "..", "sim_results", "tmaze", "coherence_full.csv"
)
oname_power_tmaze = os.path.join(
here, "..", "sim_results", "tmaze", "power_tmaze_full.csv"
)
split = os.path.splitext(os.path.basename(excel_location))
o_name_res = os.path.join(
here, "..", "sim_results", "tmaze", split[0] + "_results" + split[1]
)
# Load existing data if instructed to and it exists
os.makedirs(os.path.dirname(oname_coherence), exist_ok=True)
skip = (
(os.path.exists(decoding_loc))
and (not overwrite)
and (os.path.exists(oname_coherence))
)
if skip:
with open(decoding_loc, "r") as f:
csvreader = csv.reader(f, delimiter=",")
for i, row in enumerate(csvreader):
groups.append(row[0])
choices.append(row[1])
vals = row[2:]
new_lfp[i] = np.array([float(v) for v in vals[:lfp_len]])
coherence_df = df_from_file(oname_coherence)
power_df = df_from_file(oname_power_tmaze)
res_df = df_from_file(o_name_res)
## Extract LFP, do coherence, and plot
if not skip:
for j in range(num_rows // 2):
# row1 is the forced movement
row1 = next(ituples)
# row2 is the choice trial
row2 = next(ituples)
# Load the t-maze data
recording_location = os.path.join(base_dir, row1.location)
recording_location = recording_location.replace("--", os.sep)
param_file = os.path.join(here, "..", "recording_mappings", row1.mapping)
recording = simuran.Recording(
param_file=param_file, base_file=recording_location, load=False
)
lfp_clean = LFPClean(method=clean_method, visualise=False)
recording.signals.load()
sig_dict = lfp_clean.clean(
recording, min_f=fmin, max_f=fmax, method_kwargs=clean_kwargs
)["signals"]
x = np.array(sig_dict["SUB"].samples.to(u.mV))
duration = len(x) / 250
y = np.array(sig_dict["RSC"].samples.to(u.mV))
fig, ax = plt.subplots()
fs = sig_dict["SUB"].sampling_rate
# Setup and loading done -- Analyse the t-maze data
if do_coherence:
# Coherence over the whole recording
f, Cxy = coherence(x, y, fs, nperseg=window_sec * 250)
f = f[np.nonzero((f >= fmin) & (f <= fmax))]
Cxy = Cxy[np.nonzero((f >= fmin) & (f <= fmax))]
theta_co = Cxy[np.nonzero((f >= theta_min) & (f <= theta_max))]
delta_co = Cxy[np.nonzero((f >= delta_min) & (f <= delta_max))]
max_theta_coherence_ = np.nanmean(theta_co)
max_delta_coherence_ = np.nanmean(delta_co)
if plot_individual_sessions:
# Used to plot t-maze sessions - mostly for verification
recording.spatial.load()
spatial = recording.spatial.underlying
fig, ax = plt.subplots()
for k_, r in enumerate(
(
row1,
row2,
)
):
if k_ == 0:
trial_type = "forced"
else:
trial_type = "choice"
# Parse out the times
t1, t2, t3 = r.start, r.choice, r.end
# Make sure there are no parsing Incorrect
if t3 > duration:
raise RuntimeError(
"Last time {} greater than duration {}".format(t3, duration)
)
# Convert these times into LFP samples
lfpt1, lfpt2, lfpt3 = (
int(floor(t1 * fs)),
int(ceil(t2 * fs)),
int(ceil(t3 * fs)),
)
# Split the LFP into three parts, the start, choice, and end
lfp_portions = {}
time_dict = {
"start": (lfpt1, lfpt2, lfpt2),
"choice": (lfpt1, lfpt2, lfpt3),
"end": (lfpt2, lfpt3, lfpt3),
}
for k, v in max_lfp_lengths_seconds.items():
max_len = v
start_time = time_dict[k][0]
choice_time = time_dict[k][1]
end_time = time_dict[k][2]
if k == "start":
# If the start bit is longer than max_len, take the last X
# seconds before the choice data
ct = max_lfp_lengths_seconds["choice"][0]
end_time = max(end_time - int(floor(ct * fs)), start_time)
natural_start_time = end_time - max_len * fs
start_time = max(natural_start_time, start_time)
elif k == "choice":
# For the choice, take (max_len[0], max_len[1]) seconds
# of data around the point
left_push = int(floor(v[0] * fs))
right_push = int(ceil(v[1] * fs))
start_time = max(choice_time - left_push, start_time)
end_time = min(choice_time + right_push, end_time)
elif k == "end":
# For the end time, if the end is longer than max_len, take the first X seconds after the choice data
ct = max_lfp_lengths_seconds["choice"][1]
start_time = min(start_time + int(ceil(ct * fs)), end_time)
natural_end_time = start_time + max_len * fs
end_time = min(natural_end_time, end_time)
else:
raise RuntimeError(f"Unsupported key {k}")
# Make sure have at least 1 second
if (end_time - start_time) < fs:
end_time = start_time + fs
if end_time > int(ceil(duration * 250)):
raise RuntimeError(
"End time {} greater than duration {}".format(
end_time, duration
)
)
lfp_portions[k] = (start_time, end_time)
if j % 20 == 0:
print(f"On iteration {j} of {num_rows // 2} -- trial {trial_type}")
for k in lfp_portions.keys():
print(
"{}: {} -- {}".format(
k,
np.array(time_dict[k]) / 250,
np.array(lfp_portions[k]) / 250,
)
)
print("----------------------")
if plot_individual_sessions:
if r.test == "first":
c = "k"
else:
c = "r"
st1, st2 = int(floor(t1 * 50)), int(ceil(t3 * 50))
x_time = spatial.get_pos_x()[st1:st2]
y_time = spatial.get_pos_y()[st1:st2]
c_end = int(floor(t2 * 50))
spat_c = (spatial.get_pos_x()[c_end], spatial.get_pos_y()[c_end])
ax.plot(x_time, y_time, c=c, label=r.test)
ax.plot(spat_c[0], spat_c[1], c="b", marker="x", label="decision")
ax.plot(x_time[0], y_time[0], c="b", marker="o", label="start")
ax.plot(x_time[-1], y_time[-1], c="b", marker=".", label="end")
if do_coherence:
res_dict = {}
# Power
for k, v in lfp_portions.items():
for region, signal in sig_dict.items():
lfpt1, lfpt2 = v
lfp = NLfp()
lfp.set_channel_id(signal.channel)
lfp._timestamp = np.array(
signal.timestamps[lfpt1:lfpt2].to(u.s)
)
lfp._samples = np.array(
signal.samples[lfpt1:lfpt2].to(u.mV)
)
lfp._record_info["Sampling rate"] = signal.sampling_rate
delta_power = lfp.bandpower(
[delta_min, delta_max],
window_sec=window_sec,
band_total=True,
)
theta_power = lfp.bandpower(
[theta_min, theta_max],
window_sec=window_sec,
band_total=True,
)
res_dict["{}-{}_delta".format(region, k)] = delta_power[
"relative_power"
]
res_dict["{}-{}_theta".format(region, k)] = theta_power[
"relative_power"
]
sub_s = sig_dict["SUB"]
rsc_s = sig_dict["RSC"]
x = np.array(sub_s.samples[lfpt1:lfpt2].to(u.mV))
y = np.array(rsc_s.samples[lfpt1:lfpt2].to(u.mV))
f, Cxy = coherence(x, y, fs, nperseg=window_sec * 250, nfft=256)
f = f[np.nonzero((f >= fmin) & (f <= fmax))]
Cxy = Cxy[np.nonzero((f >= fmin) & (f <= fmax))]
if do_decoding:
if k == "choice":
coherence_vals_for_decode = Cxy[
np.nonzero((f >= theta_min) & (f <= theta_max))
]
s, e = (k_) * hf, (k_ + 1) * hf
new_lfp[j, s:e] = coherence_vals_for_decode
theta_co = Cxy[np.nonzero((f >= theta_min) & (f <= theta_max))]
delta_co = Cxy[np.nonzero((f >= delta_min) & (f <= delta_max))]
max_theta_coherence = np.nanmean(theta_co)
max_delta_coherence = np.nanmean(delta_co)
theta_co_peak = Cxy[np.nonzero((f >= 11.0) & (f <= 13.0))]
peak_theta_coherence = np.nanmax(theta_co_peak)
if trial_type == "forced":
final_trial_type = "Forced"
else:
if r.passed.strip().upper() == "Y":
final_trial_type = "Correct"
elif r.passed.strip().upper() == "N":
final_trial_type = "Incorrect"
else:
final_trial_type = "ERROR IN ANALYSIS"
res_list = [
r.location,
r.session,
r.animal,
r.test,
r.passed.strip(),
k,
final_trial_type,
]
res_list += [
res_dict[f"SUB-{k}_delta"],
res_dict[f"SUB-{k}_theta"],
res_dict[f"RSC-{k}_delta"],
res_dict[f"RSC-{k}_theta"],
]
res_list += [max_theta_coherence, max_delta_coherence]
res_list += [
max_theta_coherence_,
max_delta_coherence_,
peak_theta_coherence,
]
if no_pass is False:
group = (
"Control"
if r.animal.lower().startswith("c")
else "Lesion (ATNx)"
)
if do_coherence:
for f_, cxy_ in zip(f, Cxy):
coherence_df_list.append(
(
f_,
cxy_,
r.passed.strip(),
group,
r.test,
r.session,
k,
final_trial_type,
)
)
f_welch, Pxx = welch(
x,
fs=fs,
nperseg=window_sec * 250,
return_onesided=True,
scaling="density",
average="mean",
)
f_welch = f_welch[
np.nonzero((f_welch >= fmin) & (f_welch <= fmax))
]
Pxx = Pxx[
np.nonzero((f_welch >= fmin) & (f_welch <= fmax))
]
# Convert to full scale relative dB (so max at 0)
Pxx_max = np.max(Pxx)
Pxx = 10 * np.log10(Pxx / Pxx_max)
for p_val, f_val in zip(Pxx, f_welch):
pxx_arr.append(
[
f_val,
p_val,
r.passed.strip(),
group,
k,
final_trial_type,
]
)
res_list += [group]
results.append(res_list)
name = os.path.splitext(r.location)[0]
if plot_individual_sessions:
fig2, ax2 = plt.subplots(3, 1)
ax2[0].plot(f, Cxy, c="k")
ax2[1].plot([i / 250 for i in range(len(x))], x, c="k")
ax2[2].plot([i / 250 for i in range(len(y))], y, c="k")
base_dir_new = os.path.dirname(excel_location)
fig2.savefig(
os.path.join(
base_dir_new,
"coherence_{}_{}_{}.png".format(
row1.location, r.session, r.test
),
)
)
plt.close(fig2)
if do_decoding:
groups.append(group)
choices.append(str(r.passed).strip())
if plot_individual_sessions:
ax.invert_yaxis()
ax.legend()
base_dir_new = os.path.dirname(excel_location)
figname = os.path.join(base_dir_new, name) + "_tmaze.png"
fig.savefig(figname, dpi=400)
plt.close(fig)
if do_coherence and not skip:
# Save the results
headers = [
"location",
"session",
"animal",
"test",
"choice",
"part",
"trial",
"SUB_delta",
"SUB_theta",
"RSC_delta",
"RSC_theta",
"Theta_coherence",
"Delta_coherence",
"Full_theta_coherence",
"Full_delta_coherence",
"Peak 12Hz Theta coherence",
"Group",
]
res_df = | pd.DataFrame(results, columns=headers) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Main module."""
### Libraries ###
import pandas as pd
from datetime import datetime
import croissance
from croissance import process_curve
from croissance.estimation.outliers import remove_outliers
import re
import os
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from scipy.optimize import curve_fit
from croissance.estimation.util import with_overhangs
from croissance.estimation import regression
from pandas import Series
import subprocess
import sys
from scipy import interpolate
import math
import argparse
from matplotlib.legend_handler import HandlerLine2D
import seaborn as sns
import xlsxwriter
import datetime
#TEST
__version__ = "0.1.1"
def argument_parser(argv_list=None):
'''Asesses the input arguments and outputs flags to run the different functions according to the user needs.
Args:
argv_list: List of arguments provided by the user when running the program.
Returns:
flags
'''
#Initialize the argument parser
parser = argparse.ArgumentParser()
#Adding general arguments
parser.add_argument("-e", "--estimations", help="Get only the estimations for every sample", action = "store_true")
parser.add_argument("-f", "--figures", help="Get only the growth curve figures", action = "store_true")
parser.add_argument("-s", "--summary", help="Get only the summary of growth rate estimations",action = "store_true")
parser.add_argument("-it", "--interpolation", help="Get interpolation of growth rate measurements with given od", action = "store_true")
#Visualization arguments
parser.add_argument("-b", "--bioshaker", help="Get one growth rate figure for every individual bioshaker", action = "store_true")
parser.add_argument("-i", "--individual", help="Get one growth rate figure for every individual sample", action = "store_true")
parser.add_argument("-bc", "--bioshakercolor", help="Get one growth rate figure for every species colored by bioshaker", action = "store_true")
#Volume loss related arguments
parser.add_argument("-v", "--volumeloss", help="Volume loss compesation is not computed", action = "store_false")
parser.parse_args()
args = parser.parse_args(argv_list[1:])
#Create flags
if args.estimations == False and args.figures == False and args.summary == False :
flag_all = True
flag_est = False
flag_sum = False
flag_fig = False
flag_ind = args.individual
flag_bioshakercolor = args.bioshakercolor
flag_volume_loss = args.volumeloss
flag_bioshaker = args.bioshaker
flag_interpolation = args.interpolation
elif args.estimations == True or args.figures == True or args.summary == True :
flag_all = False
flag_est = args.estimations
flag_sum = args.summary
flag_fig = args.figures
flag_ind = args.individual
flag_bioshakercolor = args.bioshakercolor
flag_volume_loss = args.volumeloss
flag_bioshaker = args.bioshaker
flag_interpolation = args.interpolation
return flag_all, flag_est, flag_sum, flag_fig, flag_ind, flag_bioshakercolor, args.volumeloss, flag_bioshaker, flag_interpolation
# ------ PARSE THE DATA USING THE autoflow_parser LIBRARY ------
def parse_data() :
'''Calls the autoflow_parser and returns a merged xlsx document with all the OD readings combined'''
try :
call = subprocess.call("autoflow_parser",shell = True)
except :
return sys.exit("The data could not be parsed due to some error, check the input documentation")
return call
# ------ DATA LOADING AND VARIABLE SELECTION ------
def read_xlsx(filename = "results.xlsx") : #done
'''Reads .xlsx file, returns a dataframe with relevant variables. The output of the parser is set to be "results.xlsx", the default reads the mentioned file without any additional argument'''
try :
#Open data file
df = pd.read_excel(filename)
except FileNotFoundError :
return sys.exit("Could not find the parsed data file (XLSX extension)")
#Select valuable columns
cols = ["Sample ID", "Measurement", "Measurement type", "Sampling time","Sampling date"] #relevant variables
df = df[cols]
df.columns = df.columns.str.replace(' ', '_') #header spaces replaced with underscore
return df
# ------ SEPARATION OF GROWTH RATE SAMPLE AND VOLUME LOSS SAMPLES ------
def sample_outcome(sample_file, df) : #done
'''Uses an external file containing individual sample purposes, returns two classifed dataframes based on sample purposes and labelled by bioshaker.
Args:
sample_file: variable or string containing the name of the file and its extension.
df: dataframe obtained by using the read_xlsx method on the merged xlsx file.
Returns:
df_gr: dataframe containing observations related to the microbial growth rate, labelled by bioshaker.
df_vl: dataframe containing observations related to the volume loss estimation, labelled by bioshaker.
'''
#Open the file containing sample purposes
df_calc = pd.read_csv(sample_file, sep="\t") #Info about the purpose of the sample (growth rate, volume loss compensation, species and drop-out samples)
df_calc = df_calc[df_calc.Drop_out == False]
#Add species and bioshaker labels to every observation
cols=["Sample_ID", "Species"]
temp_df = df_calc[cols]
df = pd.merge(df, temp_df, how="left", on="Sample_ID")
df["bioshaker"] = df["Sample_ID"].str[0:3]
#Separate samples for growth rate or volume loss according to calc.tsv
gr_samples = df_calc.loc[df_calc["calc_gr"] == True]
gr_samples = gr_samples["Sample_ID"].tolist()
vol_loss_samples = df_calc.loc[df_calc["calc_volumeloss"] == True]
vol_loss_samples = vol_loss_samples["Sample_ID"].tolist()
#Separate initial dataframe in 2
df_gr = df[df.Sample_ID.isin(gr_samples)]
df_gr = df_gr.loc[df_gr["Measurement_type"] == "OD600"]
df_vl = df[df.Sample_ID.isin(vol_loss_samples)]
df_vl = df_vl.loc[df_vl["Measurement_type"] == "OD450"]
return df_gr, df_vl
# ------- GROWTH RATE FORMATTING FOR SUITABLE CROISSANCE ANALYSIS INPUT ------
def time_formater(df) :
'''Takes a dataframe and turns date and time variables into differential time in hours for every bioshaker, returns modified dataframe.
Args:
df: dataframe with containing date and time measurements.
Returns:
df_out: dataframe with differential time measurements in hours.
'''
#Get list of bioshakers
unique_bioshakers = df["bioshaker"].unique()
#Measurement type variable removal
df = df.drop(columns=["Measurement_type"])
#Initialize empty output dataframe
df_out = pd.DataFrame()
for bioshaker in unique_bioshakers :
#Subset initial dataframe by bioshaker
df_temp = df.loc[df["bioshaker"] == bioshaker]
unique_date = df["Sampling_date"].unique()
#Merge date and time variable to datetime format
df_temp["date_time"] = df_temp["Sampling_time"]+" "+df_temp["Sampling_date"]
df_temp["date_time"] = pd.to_datetime(df_temp["date_time"])
#Substracting the time of the first obs to all obs
df_temp['time_hours'] = df_temp["date_time"] - df_temp.loc[df_temp.index[0], 'date_time']
df_temp['h'] = df_temp['time_hours'].dt.components['hours']
df_temp['m'] = df_temp['time_hours'].dt.components['minutes']
df_temp['s'] = df_temp['time_hours'].dt.components['seconds']
df_temp["time_hours"] = df_temp['h']+df_temp['m']/60+df_temp['s']/360
#df_temp["time_hours"] = df_temp["time_hours"].dt.total_seconds()/3600
#Append dataframes together
df_out = df_out.append(df_temp)
#Removal of temporary variables
df_out = df_out.drop_duplicates()
#df_out = df_out.drop(columns=["Sampling_date", "Sampling_time"])
#df_out = df_out.drop(columns=["date_time"])
return df_out
# ------ VOLUME LOSS CORRELATION FOR EVERY SAMPLE ------
def vol_correlation(df_vl): #done
''' Assess the volume loss with OD450 measurements and correlates the OD450 readings to time for every different bioshaker, returns a correlation dataframe.
Args:
df_vl: dataframe containing only volume loss measurements.
Returns:
cor_df: dataframe containing correlation values of the volume loss according to time measurements.
'''
#Subset initial df_raw according to OD measurement and get unique IDs
unique_IDs_vl = df_vl["Sample_ID"].unique()
unique_bioshaker = df_vl["bioshaker"].unique()
cor_df = | pd.DataFrame() | pandas.DataFrame |
# =============================================================================
# File: get_fees.py
# Author: <NAME>
# Created: 12 Jun 2017
# Last Modified: 12 Jun 2017
# Description: description
# =============================================================================
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_fee_number(fee_string):
fee_list = [s for s in fee_string.split()]
return float(fee_list[-2])
def get_fee_df(url):
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
tag_texts = [
tag.text.strip() for tag in soup.find_all("td")
if len(tag.text.strip()) > 0
]
coins = tag_texts[0::2]
fees = tag_texts[1::2]
df = | pd.DataFrame() | pandas.DataFrame |
import datetime
import fiona
import geopandas as gpd
import jinja2
import logging
import numpy as np
import pandas as pd
import random
import requests
import sqlite3
import sys
import time
import yaml
from collections import ChainMap, defaultdict
from operator import attrgetter, itemgetter
from osgeo import ogr, osr
from pathlib import Path
from shapely.geometry import LineString, Point
from sqlalchemy import create_engine, exc as sqlalchemy_exc
from sqlalchemy.engine.base import Engine
from tqdm import tqdm
from tqdm.auto import trange
from typing import Any, Dict, List, Type, Union
# Set logger.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
# Enable ogr exceptions.
ogr.UseExceptions()
# Define globally accessible variables.
filepath = Path(__file__).resolve()
distribution_format_path = filepath.parent / "distribution_format.yaml"
field_domains_path = {lang: filepath.parent / f"field_domains_{lang}.yaml" for lang in ("en", "fr")}
class Timer:
"""Tracks stage runtime."""
def __init__(self) -> None:
"""Initializes the Timer class."""
self.start_time = None
def __enter__(self) -> None:
"""Starts the timer."""
logger.info("Started.")
self.start_time = time.time()
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""
Computes and returns the elapsed time.
:param Any exc_type: required parameter for __exit__.
:param Any exc_val: required parameter for __exit__.
:param Any exc_tb: required parameter for __exit__.
"""
total_seconds = time.time() - self.start_time
delta = datetime.timedelta(seconds=total_seconds)
logger.info(f"Finished. Time elapsed: {delta}.")
def apply_domain(series: pd.Series, domain: dict, default: Any) -> pd.Series:
"""
Applies a domain restriction to the given Series based on a domain dictionary.
Replaces missing or invalid values with the default value.
Non-dictionary domains are treated as Null. Values are left as-is excluding Null types and empty strings, which are
replaced with the default value.
:param pd.Series series: Series.
:param dict domain: dictionary of acceptable domain values.
:param Any default: default value.
:return pd.Series: Series with enforced domain restriction.
"""
# Validate against domain dictionary.
if isinstance(domain, dict):
# Convert keys to lowercase strings.
domain = {str(k).lower(): v for k, v in domain.items()}
# Configure lookup function, convert invalid values to default.
def get_value(val: Any) -> Any:
"""
Retrieves a domain dictionary value for a given key, non-matches return the default value.
:param Any val: lookup key.
:return Any: corresponding domain value or the default value.
"""
try:
return domain[str(val).lower()]
except KeyError:
return default
# Get values.
return series.map(get_value)
else:
# Convert empty strings and null types to default.
series.loc[(series.map(str).isin(["", "nan", "-2147483648"])) | (series.isna())] = default
return series
def cast_dtype(val: Any, dtype: Type, default: Any) -> Any:
"""
Casts the value to the given numpy dtype.
Returns the default parameter for invalid or Null values.
:param Any val: value.
:param Type dtype: numpy type object to be casted to.
:param Any default: value to be returned in case of error.
:return Any: casted or default value.
"""
try:
if pd.isna(val) or val == "":
return default
else:
return itemgetter(0)(np.array([val]).astype(dtype))
except (TypeError, ValueError):
return default
def compile_default_values(lang: str = "en") -> dict:
"""
Compiles the default value for each field in each NRN dataset.
:param str lang: output language: 'en', 'fr'.
:return dict: dictionary of default values for each attribute of each NRN dataset.
"""
dft_vals = load_yaml(field_domains_path[lang])["default"]
dist_format = load_yaml(distribution_format_path)
defaults = dict()
try:
# Iterate tables.
for name in dist_format:
defaults[name] = dict()
# Iterate fields.
for field, dtype in dist_format[name]["fields"].items():
# Configure default value.
key = "label" if dtype[0] == "str" else "code"
defaults[name][field] = dft_vals[key]
except (AttributeError, KeyError, ValueError):
logger.exception(f"Invalid schema definition for one or more yamls:"
f"\nDefault values: {dft_vals}"
f"\nDistribution format: {dist_format}")
sys.exit(1)
return defaults
def compile_domains(mapped_lang: str = "en") -> dict:
"""
Compiles the acceptable domain values for each field in each NRN dataset. Each domain will consist of the following
keys:
1) 'values': all English and French values and keys flattened into a single list.
2) 'lookup': a lookup dictionary mapping each English and French value and key to the value of the given map
language. Integer keys and their float-equivalents are both added to accommodate incorrectly casted data.
:param str mapped_lang: output language: 'en', 'fr'.
:return dict: dictionary of domain values and lookup dictionary for each attribute of each NRN dataset.
"""
# Compile field domains.
domains = defaultdict(dict)
# Load domain yamls.
domain_yamls = {lang: load_yaml(field_domains_path[lang]) for lang in ("en", "fr")}
# Iterate tables and fields with domains.
for table in domain_yamls["en"]["tables"]:
for field in domain_yamls["en"]["tables"][table]:
try:
# Compile domains.
domain_en = domain_yamls["en"]["tables"][table][field]
domain_fr = domain_yamls["fr"]["tables"][table][field]
# Configure mapped and non-mapped output domain.
domain_mapped = domain_en if mapped_lang == "en" else domain_fr
domain_non_mapped = domain_en if mapped_lang != "en" else domain_fr
# Compile all domain values and domain lookup table, separately.
if domain_en is None:
domains[table][field] = {"values": None, "lookup": None}
elif isinstance(domain_en, list):
domains[table][field] = {
"values": sorted(list({*domain_en, *domain_fr}), reverse=True),
"lookup": dict([*zip(domain_en, domain_mapped), *zip(domain_fr, domain_mapped)])
}
elif isinstance(domain_en, dict):
domains[table][field] = {
"values": sorted(list({*domain_en.values(), *domain_fr.values()}), reverse=True),
"lookup": {**domain_mapped,
**{v: v for v in domain_mapped.values()},
**{v: domain_mapped[k] for k, v in domain_non_mapped.items()}}
}
# Add integer keys as floats to accommodate incorrectly casted data.
for k, v in domain_mapped.items():
try:
domains[table][field]["lookup"].update({str(float(k)): v})
except ValueError:
continue
else:
raise TypeError
except (AttributeError, KeyError, TypeError, ValueError):
yaml_paths = ", ".join(str(field_domains_path[lang]) for lang in ("en", "fr"))
logger.exception(f"Unable to compile domains from config yamls: {yaml_paths}. Invalid schema "
f"definition for table: {table}, field: {field}.")
sys.exit(1)
return domains
def compile_dtypes(length: bool = False) -> dict:
"""
Compiles the dtype for each field in each NRN dataset. Optionally includes the field length.
:param bool length: includes the length of the field in the returned data.
:return dict: dictionary of dtypes and, optionally, length for each attribute of each NRN dataset.
"""
dist_format = load_yaml(distribution_format_path)
dtypes = dict()
try:
# Iterate tables.
for name in dist_format:
dtypes[name] = dict()
# Iterate fields.
for field, dtype in dist_format[name]["fields"].items():
# Compile dtype and field length.
dtypes[name][field] = dtype if length else dtype[0]
except (AttributeError, KeyError, ValueError):
logger.exception(f"Invalid schema definition: {dist_format}.")
sys.exit(1)
return dtypes
def create_db_engine(url: str) -> Engine:
"""
:param str url: NRN database connection URL.
:return sqlalchemy.engine.base.Engine: SQLAlchemy database engine.
"""
logger.info(f"Creating NRN database engine.")
# Create database engine.
try:
engine = create_engine(url)
except sqlalchemy_exc.SQLAlchemyError as e:
logger.exception(f"Unable to create engine for NRN database.")
logger.exception(e)
sys.exit(1)
return engine
def explode_geometry(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Explodes MultiLineStrings and MultiPoints to LineStrings and Points, respectively.
:param gpd.GeoDataFrame gdf: GeoDataFrame.
:return gpd.GeoDataFrame: GeoDataFrame containing only single-part geometries.
"""
logger.info("Exploding multi-type geometries.")
multi_types = {"MultiLineString", "MultiPoint"}
if len(set(gdf.geom_type.unique()).intersection(multi_types)):
# Separate multi- and single-type records.
multi = gdf.loc[gdf.geom_type.isin(multi_types)]
single = gdf.loc[~gdf.index.isin(multi.index)]
# Explode multi-type geometries.
multi_exploded = multi.explode().reset_index(drop=True)
# Merge all records.
merged = gpd.GeoDataFrame(pd.concat([single, multi_exploded], ignore_index=True), crs=gdf.crs)
return merged.copy(deep=True)
else:
return gdf.copy(deep=True)
def export(dfs: Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]], dst: Path, driver: str = "GPKG",
name_schemas: Union[None, dict] = None, merge_schemas: bool = False, keep_uuid: bool = True,
outer_pbar: Union[tqdm, trange, None] = None) -> None:
"""
Exports one or more (Geo)DataFrames as a specified OGR driver file / layer.
:param Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]] dfs: dictionary of NRN dataset names and (Geo)DataFrames.
:param Path dst: output path.
:param str driver: OGR driver short name, default='GPKG'.
:param Union[None, dict] name_schemas: optional dictionary mapping of dataset and field names for each provided
dataset. Expected dictionary format:
{
<dataset_name>:
name: <new_dataset_name>
fields:
<field_name>: <new_field_name>
...
...
}
:param bool merge_schemas: optional flag to merge type and name schemas such that attributes from any dataset can
exist on each provided dataset, default False.
:param bool keep_uuid: optional flag to preserve the uuid column, default True.
:param Union[tqdm, trange, None] outer_pbar: optional pre-existing tqdm progress bar.
"""
try:
# Validate / create driver.
driver = ogr.GetDriverByName(driver)
# Create directory structure and data source (only create source for layer-based drivers).
dst = Path(dst).resolve()
if dst.suffix:
dst.parent.mkdir(parents=True, exist_ok=True)
if dst.exists():
source = driver.Open(str(dst), update=1)
else:
source = driver.CreateDataSource(str(dst))
else:
dst.mkdir(parents=True, exist_ok=True)
source = None
# Compile type schemas, conditionally merge.
type_schemas = load_yaml(distribution_format_path)
if merge_schemas:
merged = {"spatial": any(type_schemas[table]["spatial"] for table in dfs),
"fields": dict(ChainMap(*[type_schema["fields"] for table, type_schema in type_schemas.items()]))}
type_schemas = {table: merged for table in type_schemas}
# Compile name schemas (filter datasets and fields within the existing type schemas and dataframe columns).
if not name_schemas:
name_schemas = {table: {"name": table, "fields": dict(zip(table_schema["fields"], table_schema["fields"]))}
for table, table_schema in type_schemas.items()}
# Iterate dataframes.
for table, df in dfs.items():
name_schema, type_schema = name_schemas[table], type_schemas[table]
schema = {"name": str(name_schema["name"]),
"spatial": type_schema["spatial"],
"fields": {field: {"name": name_schema["fields"][field],
"type": type_schema["fields"][field][0],
"width": type_schema["fields"][field][1]}
for field in set(name_schema["fields"]).intersection(set(df.columns))}}
# Conditionally add uuid to schema.
if keep_uuid and "uuid" in df.columns:
schema["fields"]["uuid"] = {"name": "uuid", "type": "str", "width": 32}
# Configure layer geometry type and spatial reference system.
spatial = schema["spatial"]
srs = None
geom_type = ogr.wkbNone
if schema["spatial"]:
srs = osr.SpatialReference()
srs.ImportFromEPSG(df.crs.to_epsg())
geom_type = attrgetter(f"wkb{df.geom_type.iloc[0]}")(ogr)
# Create source (non-layer-based drivers only) and layer.
if dst.suffix:
layer = source.CreateLayer(name=schema["name"], srs=srs, geom_type=geom_type, options=["OVERWRITE=YES"])
else:
source = driver.CreateDataSource(str(dst / schema["name"]))
layer = source.CreateLayer(name=Path(schema["name"]).stem, srs=srs, geom_type=geom_type)
# Set field definitions from schema.
ogr_field_map = {"float": ogr.OFTReal, "int": ogr.OFTInteger, "str": ogr.OFTString}
for field, specs in schema["fields"].items():
field_defn = ogr.FieldDefn(specs["name"], ogr_field_map[specs["type"]])
field_defn.SetWidth(specs["width"])
layer.CreateField(field_defn)
# Reorder and rename columns to match schema.
df = df[[*schema["fields"], "geometry"] if spatial else [*schema["fields"]]].copy(deep=True)
df.rename(columns={field: specs["name"] for field, specs in schema["fields"].items()}, inplace=True)
# Write layer.
layer.StartTransaction()
for feat in tqdm(df.itertuples(index=False), total=len(df),
desc=f"Writing to file={source.GetName()}, layer={table}",
bar_format="{desc}: |{bar}| {percentage:3.0f}% {r_bar}", leave=not bool(outer_pbar)):
# Instantiate feature.
feature = ogr.Feature(layer.GetLayerDefn())
# Compile feature properties.
properties = feat._asdict()
# Set feature geometry, if spatial.
if spatial:
geom = ogr.CreateGeometryFromWkb(properties.pop("geometry").wkb)
feature.SetGeometry(geom)
# Iterate and set feature properties (attributes).
for field_index, prop in enumerate(properties.items()):
feature.SetField(field_index, prop[-1])
# Create feature.
layer.CreateFeature(feature)
# Clear pointer for next iteration.
feature = None
layer.CommitTransaction()
# Update outer progress bar.
if outer_pbar:
outer_pbar.update(1)
except FileExistsError as e:
logger.exception(f"Invalid output directory - already exists.")
logger.exception(e)
sys.exit(1)
except (KeyError, ValueError, sqlite3.Error) as e:
logger.exception(f"Error raised when writing output: {dst}.")
logger.exception(e)
sys.exit(1)
def extract_nrn(url: str, source_code: int) -> Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]]:
"""
Extracts NRN database records for the source into (Geo)DataFrames.
:param str url: NRN database connection URL.
:param int source_code: code for the source province / territory.
:return Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]]: dictionary of NRN dataset names and (Geo)DataFrames.
"""
logger.info(f"Extracting NRN datasets for source code: {source_code}.")
# Connect to database.
con = create_db_engine(url)
# Compile field defaults, domains, and dtypes.
defaults = compile_default_values(lang="en")
domains = compile_domains(mapped_lang="en")
dtypes = compile_dtypes()
# Load and execute database queries for NRN datasets.
dfs = dict()
for sql_file in (filepath.parent / "sql/extract").glob("*.sql"):
logger.info(f"Extracting NRN dataset: {sql_file.stem}.")
try:
# Resolve layer name.
layer = sql_file.stem
# Load document as jinja template.
with open(sql_file, "r") as doc:
template = jinja2.Template(doc.read())
# Update template.
query = template.render(
source_code=source_code,
metacover=f"'{defaults[layer]['metacover']}'" if
isinstance(defaults[layer]["metacover"], str) else defaults[layer]["metacover"],
specvers=2.0,
muniquad=f"'{defaults['strplaname']['muniquad']}'" if
isinstance(defaults['strplaname']["muniquad"], str) else defaults['strplaname']["muniquad"]
)
# Execute query.
df = gpd.read_postgis(query, con, geom_col="geometry")
# Store non-empty dataset.
if len(df):
dfs[layer] = df.copy(deep=True)
except (jinja2.TemplateError, jinja2.TemplateAssertionError, jinja2.UndefinedError) as e:
logger.exception(f"Unable to load SQL from: {sql_file}.")
logger.exception(e)
# Separate individual datasets from extracted data.
logger.info("Separating individual datasets from extracted data.")
nrn = dict()
# Separate dataset: addrange.
logger.info("Separating dataset: addrange.")
# Separate records.
addrange = dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 1, [
"addrange_acqtech", "metacover", "addrange_credate", "datasetnam", "accuracy", "addrange_provider",
"addrange_revdate", "specvers", "l_altnanid", "r_altnanid", "addrange_l_digdirfg", "addrange_r_digdirfg",
"addrange_l_hnumf", "addrange_r_hnumf", "addrange_l_hnumsuff", "addrange_r_hnumsuff", "addrange_l_hnumtypf",
"addrange_r_hnumtypf", "addrange_l_hnumstr", "addrange_r_hnumstr", "addrange_l_hnuml", "addrange_r_hnuml",
"addrange_l_hnumsufl", "addrange_r_hnumsufl", "addrange_l_hnumtypl", "addrange_r_hnumtypl", "addrange_nid",
"segment_id_left", "segment_id_right", "addrange_l_rfsysind", "addrange_r_rfsysind"]
].rename(columns={
"addrange_acqtech": "acqtech", "addrange_credate": "credate", "addrange_provider": "provider",
"addrange_revdate": "revdate", "addrange_l_digdirfg": "l_digdirfg", "addrange_r_digdirfg": "r_digdirfg",
"addrange_l_hnumf": "l_hnumf", "addrange_r_hnumf": "r_hnumf", "addrange_l_hnumsuff": "l_hnumsuff",
"addrange_r_hnumsuff": "r_hnumsuff", "addrange_l_hnumtypf": "l_hnumtypf", "addrange_r_hnumtypf": "r_hnumtypf",
"addrange_l_hnumstr": "l_hnumstr", "addrange_r_hnumstr": "r_hnumstr", "addrange_l_hnuml": "l_hnuml",
"addrange_r_hnuml": "r_hnuml", "addrange_l_hnumsufl": "l_hnumsufl", "addrange_r_hnumsufl": "r_hnumsufl",
"addrange_l_hnumtypl": "l_hnumtypl", "addrange_r_hnumtypl": "r_hnumtypl", "addrange_nid": "nid",
"segment_id_left": "l_offnanid", "segment_id_right": "r_offnanid", "addrange_l_rfsysind": "l_rfsysind",
"addrange_r_rfsysind": "r_rfsysind"}
).copy(deep=True)
addrange.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["addrange"] = pd.DataFrame(addrange).copy(deep=True)
# Separate dataset: junction.
logger.info(f"Separating dataset: junction.")
# Separate records.
junction = dfs["junction"][[
"acqtech", "metacover", "credate", "datasetnam", "accuracy", "provider", "revdate", "specvers", "exitnbr",
"junctype", "nid", "geometry"]].copy(deep=True)
junction.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["junction"] = junction.copy(deep=True)
# Separate dataset: ferryseg.
if 2 in set(dfs["roadseg"]["segment_type"]):
logger.info("Separating dataset: ferryseg.")
# Separate records.
ferryseg = dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 2, [
"acqtech", "metacover", "credate", "datasetnam", "accuracy", "provider", "revdate", "specvers", "closing",
"ferrysegid", "roadclass", "nid", "rtename1en", "rtename2en", "rtename3en", "rtename4en", "rtename1fr",
"rtename2fr", "rtename3fr", "rtename4fr", "rtnumber1", "rtnumber2", "rtnumber3", "rtnumber4", "rtnumber5",
"geometry"]].copy(deep=True)
ferryseg.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["ferryseg"] = ferryseg.copy(deep=True)
# Separate dataset: roadseg.
logger.info("Separating dataset: roadseg.")
# Separate records.
roadseg = dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 1, [
"acqtech", "metacover", "credate", "datasetnam", "accuracy", "provider", "revdate", "specvers",
"addrange_l_digdirfg", "addrange_r_digdirfg", "addrange_nid", "closing", "exitnbr", "addrange_l_hnumf",
"addrange_r_hnumf", "roadclass", "addrange_l_hnuml", "addrange_r_hnuml", "nid", "nbrlanes",
"strplaname_l_placename", "strplaname_r_placename", "l_stname_c", "r_stname_c", "pavsurf", "pavstatus",
"roadjuris", "roadsegid", "rtename1en", "rtename2en", "rtename3en", "rtename4en", "rtename1fr", "rtename2fr",
"rtename3fr", "rtename4fr", "rtnumber1", "rtnumber2", "rtnumber3", "rtnumber4", "rtnumber5", "speed",
"strunameen", "strunamefr", "structid", "structtype", "trafficdir", "unpavsurf", "geometry"]
].rename(columns={
"addrange_l_digdirfg": "l_adddirfg", "addrange_r_digdirfg": "r_adddirfg", "addrange_nid": "adrangenid",
"addrange_l_hnumf": "l_hnumf", "addrange_r_hnumf": "r_hnumf", "addrange_l_hnuml": "l_hnuml",
"addrange_r_hnuml": "r_hnuml", "strplaname_l_placename": "l_placenam", "strplaname_r_placename": "r_placenam"}
).copy(deep=True)
roadseg.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["roadseg"] = roadseg.copy(deep=True)
# Separate dataset: strplaname.
logger.info("Separating dataset: strplaname.")
# Separate records.
strplaname = pd.DataFrame().append([
dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 1, [
"strplaname_l_acqtech", "metacover", "strplaname_l_credate", "datasetnam", "accuracy",
"strplaname_l_provider", "strplaname_l_revdate", "specvers", "strplaname_l_dirprefix",
"strplaname_l_dirsuffix", "muniquad", "segment_id_left", "strplaname_l_placename",
"strplaname_l_placetype", "strplaname_l_province", "strplaname_l_starticle", "strplaname_l_namebody",
"strplaname_l_strtypre", "strplaname_l_strtysuf"]
].rename(columns={
"strplaname_l_acqtech": "acqtech", "strplaname_l_credate": "credate", "strplaname_l_provider": "provider",
"strplaname_l_revdate": "revdate", "strplaname_l_dirprefix": "dirprefix",
"strplaname_l_dirsuffix": "dirsuffix", "segment_id_left": "nid", "strplaname_l_placename": "placename",
"strplaname_l_placetype": "placetype", "strplaname_l_province": "province",
"strplaname_l_starticle": "starticle", "strplaname_l_namebody": "namebody",
"strplaname_l_strtypre": "strtypre", "strplaname_l_strtysuf": "strtysuf"}),
dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 1, [
"strplaname_r_acqtech", "metacover", "strplaname_r_credate", "datasetnam", "accuracy",
"strplaname_r_provider", "strplaname_r_revdate", "specvers", "strplaname_r_dirprefix",
"strplaname_r_dirsuffix", "muniquad", "segment_id_right", "strplaname_r_placename",
"strplaname_r_placetype", "strplaname_r_province", "strplaname_r_starticle", "strplaname_r_namebody",
"strplaname_r_strtypre", "strplaname_r_strtysuf"]
].rename(columns={
"strplaname_r_acqtech": "acqtech", "strplaname_r_credate": "credate", "strplaname_r_provider": "provider",
"strplaname_r_revdate": "revdate", "strplaname_r_dirprefix": "dirprefix",
"strplaname_r_dirsuffix": "dirsuffix", "segment_id_right": "nid", "strplaname_r_placename": "placename",
"strplaname_r_placetype": "placetype", "strplaname_r_province": "province",
"strplaname_r_starticle": "starticle", "strplaname_r_namebody": "namebody",
"strplaname_r_strtypre": "strtypre", "strplaname_r_strtysuf": "strtysuf"})]).copy(deep=True)
strplaname.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["strplaname"] = | pd.DataFrame(strplaname) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 13:59:31 2020
@author: <NAME>
"""
import sys, os
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src')
sys.path.append('H:/cloud/cloud_data/Projects/DL/Code/src/ct')
import pandas as pd
import ntpath
import datetime
from openpyxl.worksheet.datavalidation import DataValidation
from openpyxl.formatting.formatting import ConditionalFormattingList
from openpyxl.styles import Font, Color, Border, Side
from openpyxl.styles import Protection
from openpyxl.styles import PatternFill
from glob import glob
from shutil import copyfile
import numpy as np
from collections import defaultdict
from openpyxl.utils import get_column_letter
from CTDataStruct import CTPatient
import keyboard
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.formatting import Rule
from settings import initSettings, saveSettings, loadSettings, fillSettingsTags
from classification import createRFClassification, initRFClassification, classifieRFClassification
from filterTenStepsGuide import filter_CACS_10StepsGuide, filter_CACS, filter_NCS, filterReconstruction, filter_CTA, filer10StepsGuide, filterReconstructionRF
from discharge_extract import extractDICOMTags
from tqdm import tqdm
#from reco.reco_filter import RecoFilter
patient_status = ['OK', 'EXCLUDED', 'MISSING_CACS', 'MISSING_CTA', 'MISSING_NC_CACS', 'MISSING_NC_CTA']
patient_status_manual = ['OK', 'EXCLUDED', 'UNDEFINED', 'INPROGRESS']
patient_status_manualStr = '"' + 'OK,' + 'EXCLUDED,' + 'UNDEFINED,' + 'INPROGRESS,' + '"'
scanClasses = defaultdict(lambda:None,{'UNDEFINED': 0, 'CACS': 1, 'CTA': 2, 'NCS_CACS': 3, 'NCS_CTA': 4, 'ICA': 5, 'OTHER': 6})
scanClassesInv = defaultdict(lambda:None,{0: 'UNDEFINED', 1: 'CACS', 2: 'CTA', 3: 'NCS_CACS', 4: 'NCS_CTA', 5: 'ICA', 6: 'OTHER'})
scanClassesStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER' +'"'
scanClassesManualStr = '"' + 'UNDEFINED,' + 'CACS,' + 'CTA,' + 'NCS_CACS,' + 'NCS_CTA,' + 'ICA,' + 'OTHER,' + 'PROBLEM,' + 'QUESTION,' +'"'
imageQualityStr = '"' + 'UNDEFINED,' + 'GOOD,' + 'BAD' +'"'
recoClasses = ['FBP', 'IR', 'UNDEFINED']
changeClasses = ['NO_CHANGE', 'SOURCE_CHANGE', 'MASTER_CHANGE', 'MASTER_SOURCE_CHANGE']
def setColor(workbook, sheet, rows, NumColumns, color):
for r in rows:
if r % 100 == 0:
print('index:', r, '/', max(rows))
for c in range(1,NumColumns):
cell = sheet.cell(r, c)
cell.fill = PatternFill(start_color=color, end_color=color, fill_type = 'solid')
def setColorFormula(sheet, formula, color, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B2:" + str(column_letter) + str(NumRows)
dxf = DifferentialStyle(font=Font(color=color))
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
def setBorderFormula(sheet, formula, NumRows, NumColumns):
column_letter = get_column_letter(NumColumns+1)
colorrange="B1:" + str(column_letter) + str(NumRows)
thin = Side(border_style="thin", color="000000")
border = Border(bottom=thin)
dxf = DifferentialStyle(border=border)
r = Rule(type="expression", dxf=dxf, stopIfTrue=True)
r.formula = [formula]
sheet.conditional_formatting.add(colorrange, r)
# Set border for index
for i in range(1, NumRows + 1):
cell = sheet.cell(i, 1)
cell.border = Border()
return sheet
def sortFilepath(filepathList):
filenameList=[]
folderpathList=[]
for filepath in filepathList:
folderpath, filename, _ = splitFilePath(filepath)
filenameList.append(filename)
folderpathList.append(folderpath)
dates_str = [x.split('_')[-1] for x in filenameList]
dates = [datetime.datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
idx = list(np.argsort(dates))
filepathlistsort=[]
for i in idx:
filepathlistsort.append(folderpathList[i] + '/' + '_'.join(filenameList[i].split('_')[0:-1]) + '_' + dates[i].strftime("%d%m%Y") + '.xlsx')
return filepathlistsort
def sortFolderpath(folderpath, folderpathList):
dates_str = [x.split('_')[-1] for x in folderpathList]
dates = [datetime(int(x[4:8]), int(x[2:4]), int(x[0:2])) for x in dates_str]
date_str = folderpath.split('_')[-1]
date = datetime(int(date_str[4:8]), int(date_str[2:4]), int(date_str[0:2]))
idx = list(np.argsort(dates))
folderpathSort=[]
for i in idx:
folderpathSort.append(folderpathList[i])
if dates[i] == date:
break
return folderpathSort
def isNaN(num):
return num != num
def splitFilePath(filepath):
""" Split filepath into folderpath, filename and file extension
:param filepath: Filepath
:type filepath: str
"""
folderpath, _ = ntpath.split(filepath)
head, file_extension = os.path.splitext(filepath)
folderpath, filename = ntpath.split(head)
return folderpath, filename, file_extension
def update_CACS_10StepsGuide(df_CACS, sheet):
for index, row in df_CACS.iterrows():
cell_str = 'AB' + str(index+2)
cell = sheet[cell_str]
cell.value = row['CACS10StepsGuide']
#cell.protection = Protection(locked=False)
return sheet
def mergeITT(df_ITT, df_data):
# Merge ITT table
print('Merge ITT table')
for i in range(len(df_data)):
patient = df_ITT[df_ITT['ID']==df_data.loc[i, 'PatientID']]
if len(patient)==1:
df_data.loc[i, 'ITT'] = patient.iloc[0]['ITT']
df_data.loc[i, 'Date CT'] = patient.iloc[0]['Date CT']
df_data.loc[i, 'Date ICA'] = patient.iloc[0]['Date ICA']
return df_data
def mergeDicom(df_dicom, df_data_old=None):
print('Merge dicom table')
if df_data_old is None:
df_data = df_dicom.copy()
else:
idx = df_dicom['SeriesInstanceUID'].isin(df_data_old['SeriesInstanceUID'])
df_data = | pd.concat([df_data_old, df_dicom[idx==False]], axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 15:39:02 2018
@author: joyce
"""
import pandas as pd
import numpy as np
from numpy.matlib import repmat
from stats import get_stockdata_from_sql,get_tradedate,Corr,Delta,Rank,Cross_max,\
Cross_min,Delay,Sum,Mean,STD,TsRank,TsMax,TsMin,DecayLinear,Count,SMA,Cov,DTM,DBM,\
Highday,Lowday,HD,LD,RegBeta,RegResi,SUMIF,get_indexdata_from_sql,timer,get_fama
class stAlpha(object):
def __init__(self,begin,end):
self.begin = begin
self.end = end
self.close = get_stockdata_from_sql(1,self.begin,self.end,'Close')
self.open = get_stockdata_from_sql(1,self.begin,self.end,'Open')
self.high = get_stockdata_from_sql(1,self.begin,self.end,'High')
self.low = get_stockdata_from_sql(1,self.begin,self.end,'Low')
self.volume = get_stockdata_from_sql(1,self.begin,self.end,'Vol')
self.amt = get_stockdata_from_sql(1,self.begin,self.end,'Amount')
self.vwap = get_stockdata_from_sql(1,self.begin,self.end,'Vwap')
self.ret = get_stockdata_from_sql(1,begin,end,'Pctchg')
self.close_index = get_indexdata_from_sql(1,begin,end,'close','000001.SH')
self.open_index = get_indexdata_from_sql(1,begin,end,'open','000001.SH')
# self.mkt = get_fama_from_sql()
@timer
def alpha1(self):
volume = self.volume
ln_volume = np.log(volume)
ln_volume_delta = Delta(ln_volume,1)
close = self.close
Open = self.open
price_temp = pd.concat([close,Open],axis = 1,join = 'outer')
price_temp['ret'] = (price_temp['Close'] - price_temp['Open'])/price_temp['Open']
del price_temp['Close'],price_temp['Open']
r_ln_volume_delta = Rank(ln_volume_delta)
r_ret = Rank(price_temp)
rank = pd.concat([r_ln_volume_delta,r_ret],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,6)
alpha = corr
alpha.columns = ['alpha1']
return alpha
@timer
def alpha2(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
temp['alpha'] = (2 * temp['Close'] - temp['Low'] - temp['High']) \
/ (temp['High'] - temp['Low'])
del temp['Close'],temp['Low'],temp['High']
alpha = -1 * Delta(temp,1)
alpha.columns = ['alpha2']
return alpha
@timer
def alpha3(self):
close = self.close
low = self.low
high = self.high
temp = pd.concat([close,low,high],axis = 1,join = 'outer')
close_delay = Delay(pd.DataFrame(temp['Close']),1)
close_delay.columns = ['close_delay']
temp = pd.concat([temp,close_delay],axis = 1,join = 'inner')
temp['min'] = Cross_max(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['Low']))
temp['max'] = Cross_min(pd.DataFrame(temp['close_delay']),pd.DataFrame(temp['High']))
temp['alpha_temp'] = 0
temp['alpha_temp'][temp['Close'] > temp['close_delay']] = temp['Close'] - temp['min']
temp['alpha_temp'][temp['Close'] < temp['close_delay']] = temp['Close'] - temp['max']
alpha = Sum(pd.DataFrame(temp['alpha_temp']),6)
alpha.columns = ['alpha3']
return alpha
@timer
def alpha4(self):
close = self.close
volume = self.volume
close_mean_2 = Mean(close,2)
close_mean_8 = Mean(close,8)
close_std = STD(close,8)
volume_mean_20 = Mean(volume,20)
data = pd.concat([close_mean_2,close_mean_8,close_std,volume_mean_20,volume],axis = 1,join = 'inner')
data.columns = ['close_mean_2','close_mean_8','close_std','volume_mean_20','volume']
data['alpha'] = -1
data['alpha'][data['close_mean_2'] < data['close_mean_8'] - data['close_std']] = 1
data['alpha'][data['volume']/data['volume_mean_20'] >= 1] = 1
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha4']
return alpha
@timer
def alpha5(self):
volume = self.volume
high = self.high
r1 = TsRank(volume,5)
r2 = TsRank(high,5)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(corr,5)
alpha.columns = ['alpha5']
return alpha
@timer
def alpha6(self):
Open = self.open
high = self.high
df = pd.concat([Open,high],axis = 1,join = 'inner')
df['price'] = df['Open'] * 0.85 + df['High'] * 0.15
df_delta = Delta(pd.DataFrame(df['price']),1)
alpha = Rank(np.sign(df_delta))
alpha.columns = ['alpha6']
return alpha
@timer
def alpha7(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_delta = Delta(volume,3)
data = pd.concat([close,vwap],axis = 1,join = 'inner')
data['diff'] = data['Vwap'] - data['Close']
r1 = Rank(TsMax(pd.DataFrame(data['diff']),3))
r2 = Rank(TsMin(pd.DataFrame(data['diff']),3))
r3 = Rank(volume_delta)
rank = pd.concat([r1,r2,r3],axis = 1,join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = (rank['r1'] + rank['r2'])* rank['r3']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha7']
return alpha
@timer
def alpha8(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
data_price = (data['High'] + data['Low'])/2 * 0.2 + data['Vwap'] * 0.2
data_price_delta = Delta(pd.DataFrame(data_price),4) * -1
alpha = Rank(data_price_delta)
alpha.columns = ['alpha8']
return alpha
@timer
def alpha9(self):
high = self.high
low = self.low
volume = self.volume
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['price']= (data['High'] + data['Low'])/2
data['price_delay'] = Delay(pd.DataFrame(data['price']),1)
alpha_temp = (data['price'] - data['price_delay']) * (data['High'] - data['Low'])/data['Vol']
alpha_temp_unstack = alpha_temp.unstack(level = 'ID')
alpha = alpha_temp_unstack.ewm(span = 7, ignore_na = True, min_periods = 7).mean()
alpha_final = alpha.stack()
alpha = pd.DataFrame(alpha_final)
alpha.columns = ['alpha9']
return alpha
@timer
def alpha10(self):
ret = self.ret
close = self.close
ret_std = STD(pd.DataFrame(ret),20)
ret_std.columns = ['ret_std']
data = pd.concat([ret,close,ret_std],axis = 1, join = 'inner')
temp1 = pd.DataFrame(data['ret_std'][data['Pctchg'] < 0])
temp2 = pd.DataFrame(data['Close'][data['Pctchg'] >= 0])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0,join = 'outer')
temp_order = pd.concat([data,temp],axis = 1)
temp_square = pd.DataFrame(np.power(temp_order['temp'],2))
alpha_temp = TsMax(temp_square,5)
alpha = Rank(alpha_temp)
alpha.columns = ['alpha10']
return alpha
@timer
def alpha11(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume],axis = 1,join = 'inner')
data_temp = (data['Close'] - data['Low']) -(data['High'] - data['Close'])\
/(data['High'] - data['Low']) * data['Vol']
alpha = Sum(pd.DataFrame(data_temp),6)
alpha.columns = ['alpha11']
return alpha
@timer
def alpha12(self):
Open = self.open
vwap = self.vwap
close = self.close
data = pd.concat([Open,vwap,close],axis = 1, join = 'inner')
data['p1'] = data['Open'] - Mean(data['Open'],10)
data['p2'] = data['Close'] - data['Vwap']
r1 = Rank(pd.DataFrame(data['p1']))
r2 = Rank(pd.DataFrame(np.abs(data['p2'])))
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
rank.columns = ['r1','r2']
alpha = rank['r1'] - rank['r2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha12']
return alpha
@timer
def alpha13(self):
high = self.high
low = self.low
vwap = self.vwap
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
alpha = (data['High'] + data['Low'])/2 - data['Vwap']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha13']
return alpha
@timer
def alpha14(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close'] - data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha14']
return alpha
@timer
def alpha15(self):
Open = self.open
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([Open,close_delay],axis = 1,join = 'inner')
alpha = data['Open']/data['close_delay'] - 1
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha15']
return alpha
@timer
def alpha16(self):
vwap = self.vwap
volume = self.volume
data = pd.concat([vwap,volume],axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vol']))
r2 = Rank(pd.DataFrame(data['Vwap']))
rank = pd.concat([r1,r2],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,5)
alpha = -1 * TsMax(Rank(corr),5)
alpha.columns = ['alpha16']
return alpha
@timer
def alpha17(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close],axis = 1, join = 'inner')
data['vwap_max15'] = TsMax(data['Vwap'],15)
data['close_delta5'] = Delta(data['Close'],5)
temp = np.power(data['vwap_max15'],data['close_delta5'])
alpha = Rank(pd.DataFrame(temp))
alpha.columns = ['alpha17']
return alpha
@timer
def alpha18(self):
"""
this one is similar with alpha14
"""
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = data['Close']/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha18']
return alpha
@timer
def alpha19(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data['temp1'] = (data['Close'] - data['close_delay'])/data['close_delay']
data['temp2'] = (data['Close'] - data['close_delay'])/data['Close']
temp1 = pd.DataFrame(data['temp1'][data['Close'] < data['close_delay']])
temp2 = pd.DataFrame(data['temp2'][data['Close'] >= data['close_delay']])
temp1.columns = ['temp']
temp2.columns = ['temp']
temp = pd.concat([temp1,temp2],axis = 0)
data = pd.concat([data,temp],axis = 1,join = 'outer')
alpha = pd.DataFrame(data['temp'])
alpha.columns = ['alpha19']
return alpha
@timer
def alpha20(self):
close = self.close
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha20']
return alpha
@timer
def alpha21(self):
close = self.close
close_mean = Mean(close,6)
alpha = RegBeta(0,close_mean,None,6)
alpha.columns = ['alpha21']
return alpha
@timer
def alpha22(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1,join = 'inner')
data.columns = ['close','close_mean']
temp = pd.DataFrame((data['close'] - data['close_mean'])/data['close_mean'])
temp_delay = Delay(temp,3)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
temp2 = pd.DataFrame(data_temp['temp'] - data_temp['temp_delay'])
alpha = SMA(temp2,12,1)
alpha.columns = ['alpha22']
return alpha
@timer
def alpha23(self):
close = self.close
close_std = STD(close,20)
close_delay = Delay(close,1)
data = pd.concat([close,close_std,close_delay],axis = 1, join = 'inner')
data.columns = ['Close','close_std','close_delay']
data['temp'] = data['close_std']
data['temp'][data['Close'] <= data['close_delay']] = 0
temp = pd.DataFrame(data['temp'])
sma1 = SMA(temp,20,1)
sma2 = SMA(pd.DataFrame(data['close_std']),20,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'])
alpha.columns = ['alpha23']
return alpha
@timer
def alpha24(self):
close = self.close
close_delay = Delay(close,5)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis=1 ,join = 'inner' )
temp = data['Close'] - data['close_delay']
temp = pd.DataFrame(temp)
alpha = SMA(temp,5,1)
alpha.columns = ['alpha24']
return alpha
@timer
def alpha25(self):
close = self.close
close_delta = Delta(close,7)
ret = self.ret
r1 = Rank(close_delta)
r3 = Rank(Sum(ret,250))
volume = self.volume
volume_mean = Mean(pd.DataFrame(volume['Vol']),20)
volume_mean.columns = ['volume_mean']
data = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
temp0 = pd.DataFrame(data['Vol']/data['volume_mean'])
temp = DecayLinear(temp0,9)
r2 = Rank(temp)
rank = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
rank.columns = ['r1','r2','r3']
alpha = pd.DataFrame(-1 * rank['r1'] * (1 - rank['r2']) * rank['r3'])
alpha.columns = ['alpha25']
return alpha
@timer
def alpha26(self):
close = self.close
vwap = self.vwap
close_mean7 = Mean(close,7)
close_mean7.columns = ['close_mean7']
close_delay5 = Delay(close,5)
close_delay5.columns = ['close_delay5']
data = pd.concat([vwap,close_delay5],axis = 1,join = 'inner')
corr = Corr(data,230)
corr.columns = ['corr']
data_temp = pd.concat([corr,close_mean7,close],axis = 1,join = 'inner')
alpha = data_temp['close_mean7'] - data_temp['Close'] + data_temp['corr']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha26']
return alpha
@timer
def alpha27(self):
"""
uncompleted
"""
close = self.close
close_delay3 = Delay(close,3)
close_delay6 = Delay(close,6)
data = pd.concat([close,close_delay3,close_delay6],axis = 1,join = 'inner')
data.columns = ['close','close_delay3','close_delay6']
temp1 = pd.DataFrame((data['close'] - data['close_delay3'])/data['close_delay3'] * 100)
temp2 = pd.DataFrame((data['close'] - data['close_delay6'])/data['close_delay6'] * 100)
data_temp = pd.concat([temp1,temp2],axis = 1,join = 'inner')
data_temp.columns = ['temp1','temp2']
temp = pd.DataFrame(data_temp['temp1'] + data_temp['temp2'])
alpha = DecayLinear(temp,12)
alpha.columns = ['alpha27']
return alpha
@timer
def alpha28(self):
close = self.close
low = self.low
high = self.high
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['Close','low_min','high_max']
temp1 = pd.DataFrame((data['Close'] - data['low_min']) /(data['high_max'] - data['low_min']))
sma1 = SMA(temp1,3,1)
sma2 = SMA(sma1,3,1)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] * 2 - sma['sma2'] * 3)
alpha.columns = ['alpha28']
return alpha
@timer
def alpha29(self):
close = self.close
volume = self.volume
close_delay = Delay(close,6)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay'] * data['Vol']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha29']
return alpha
@timer
def alpha30(self):
close = self.close
close_delay = Delay(close,1)
@timer
def alpha31(self):
close = self.close
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
alpha = (data['Close'] - data['close_delay'])/data['close_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha31']
return alpha
@timer
def alpha32(self):
volume = self.volume
high = self.high
r1 = Rank(volume)
r2 = Rank(high)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,3)
r = Rank(corr)
alpha = -1 * Sum(r,3)
alpha.columns = ['alpha32']
return alpha
@timer
def alpha33(self):
low = self.low
volume = self.volume
ret = self.ret
low_min = TsMin(low,5)
low_min_delay = Delay(low_min,5)
data1 = pd.concat([low_min,low_min_delay],axis = 1,join = 'inner')
data1.columns = ['low_min','low_min_delay']
ret_sum240 = Sum(ret,240)
ret_sum20 = Sum(ret,20)
ret_temp = pd.concat([ret_sum240,ret_sum20],axis = 1, join = 'inner')
ret_temp.columns = ['ret240','ret20']
temp1 = pd.DataFrame(data1['low_min_delay'] - data1['low_min'])
temp2 = pd.DataFrame((ret_temp['ret240'] - ret_temp['ret20'])/220)
r_temp2 = Rank(temp2)
r_volume = TsRank(volume,5)
temp = pd.concat([temp1,r_temp2,r_volume],axis = 1,join = 'inner')
temp.columns = ['temp1','r_temp2','r_volume']
alpha = temp['temp1'] * temp['r_temp2'] * temp['r_volume']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha33']
return alpha
@timer
def alpha34(self):
close = self.close
close_mean = Mean(close,12)
close_mean.columns = ['close_mean']
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
alpha = pd.DataFrame(data['close_mean']/data['Close'])
alpha.columns = ['alpha34']
return alpha
@timer
def alpha35(self):
volume = self.volume
Open = self.open
open_delay = Delay(Open,1)
open_delay.columns = ['open_delay']
open_linear = DecayLinear(Open,17)
open_linear.columns = ['open_linear']
open_delay_temp = DecayLinear(open_delay,15)
r1 = Rank(open_delay_temp)
data = pd.concat([Open,open_linear],axis = 1,join = 'inner')
Open_temp = data['Open'] * 0.65 + 0.35 * data['open_linear']
rank = pd.concat([volume,Open_temp],axis = 1, join = 'inner')
rank.columns = ['r1','r2']
corr = Corr(rank,7)
r2 = Rank(-1 * corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = Cross_min(pd.DataFrame(r['r1']),pd.DataFrame(r['r2']))
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha35']
return alpha
@timer
def alpha36(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2],axis = 1,join = 'inner')
corr = Corr(rank,6)
temp = Sum(corr,2)
alpha = Rank(temp)
alpha.columns = ['alpha36']
return alpha
@timer
def alpha37(self):
Open = self.open
ret = self.ret
open_sum = Sum(Open,5)
ret_sum = Sum(ret,5)
data = pd.concat([open_sum,ret_sum],axis = 1,join = 'inner')
data.columns = ['open_sum','ret_sum']
temp = data['open_sum'] * data['ret_sum']
temp_delay = Delay(temp,10)
data_temp = pd.concat([temp,temp_delay],axis = 1,join = 'inner')
data_temp.columns = ['temp','temp_delay']
alpha = -1 * Rank(pd.DataFrame(data_temp['temp'] - data_temp['temp_delay']))
alpha.columns = ['alpha37']
return alpha
@timer
def alpha38(self):
high = self.high
high_mean = Mean(high,20)
high_delta = Delta(high,2)
data = pd.concat([high,high_mean,high_delta],axis = 1,join = 'inner')
data.columns = ['high','high_mean','high_delta']
data['alpha'] = -1 * data['high_delta']
data['alpha'][data['high_mean'] >= data['high']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha38']
return alpha
@timer
def alpha39(self):
close = self.close
Open = self.open
vwap = self.vwap
volume = self.volume
close_delta2 = Delta(close,2)
close_delta2_decay = DecayLinear(close_delta2,8)
r1 = Rank(close_delta2_decay)
price_temp = pd.concat([vwap,Open],axis = 1,join = 'inner')
price = pd.DataFrame(price_temp['Vwap'] * 0.3 + price_temp['Open'] * 0.7)
volume_mean = Mean(volume,180)
volume_mean_sum = Sum(volume_mean,37)
rank = pd.concat([price,volume_mean_sum],axis = 1,join = 'inner')
corr = Corr(rank,14)
corr_decay = DecayLinear(corr,12)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns = ['alpha39']
return alpha
@timer
def alpha40(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
data = pd.concat([close,volume,close_delay],axis = 1, join = 'inner')
data.columns = ['close','volume','close_delay']
data['temp1'] = data['volume']
data['temp2'] = data['volume']
data['temp1'][data['close'] <= data['close_delay']] = 0
data['temp2'][data['close'] > data['close_delay']] = 0
s1 = Sum(pd.DataFrame(data['temp1']),26)
s2 = Sum(pd.DataFrame(data['temp2']),26)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha40']
return alpha
@timer
def alpha41(self):
vwap = self.vwap
vwap_delta = Delta(vwap,3)
vwap_delta_max = TsMax(vwap_delta,5)
alpha = -1 * Rank(vwap_delta_max)
alpha.columns = ['alpha41']
return alpha
@timer
def alpha42(self):
high = self.high
volume = self.volume
high_std = STD(high,10)
r1 = Rank(high_std)
data = pd.concat([high,volume],axis = 1,join = 'inner')
corr = Corr(data,10)
r = pd.concat([r1,corr],axis = 1,join = 'inner')
r.columns = ['r1','corr']
alpha = pd.DataFrame(-1 * r['r1'] * r['corr'])
alpha.columns = ['alpha42']
return alpha
@timer
def alpha43(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,6)
alpha.columns = ['alpha43']
return alpha
@timer
def alpha44(self):
volume = self.volume
vwap = self.vwap
low = self.low
volume_mean = Mean(volume,10)
rank = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(rank,7)
corr_decay = DecayLinear(corr,6)
r1 = TsRank(corr_decay,4)
vwap_delta = Delta(vwap,3)
vwap_delta_decay = DecayLinear(vwap_delta,10)
r2 = TsRank(vwap_delta_decay,15)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha44']
return alpha
@timer
def alpha45(self):
volume = self.volume
vwap = self.vwap
close = self.close
Open = self.open
price = pd.concat([close,Open],axis = 1,join = 'inner')
price['price'] = price['Close'] * 0.6 + price['Open'] * 0.4
price_delta = Delta(pd.DataFrame(price['price']),1)
r1 = Rank(price_delta)
volume_mean = Mean(volume,150)
data = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,15)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha45']
return alpha
@timer
def alpha46(self):
close = self.close
close_mean3 = Mean(close,3)
close_mean6 = Mean(close,6)
close_mean12 = Mean(close,12)
close_mean24 = Mean(close,24)
data = pd.concat([close,close_mean3,close_mean6,close_mean12,close_mean24],axis = 1,join = 'inner')
data.columns = ['c','c3','c6','c12','c24']
alpha = (data['c3'] + data['c6'] + data['c12'] + data['c24'])/(4 * data['c'])
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha46']
return alpha
@timer
def alpha47(self):
close = self.close
low = self.low
high = self.high
high_max = TsMax(high,6)
low_min = TsMin(low,6)
data = pd.concat([high_max,low_min,close],axis = 1,join = 'inner')
data.columns = ['high_max','low_min','close']
temp = pd.DataFrame((data['high_max'] - data['close'])/(data['high_max'] - \
data['low_min']) * 100)
alpha = SMA(temp,9,1)
alpha.columns = ['alpha47']
return alpha
@timer
def alpha48(self):
close = self.close
volume = self.volume
temp1 = Delta(close,1)
temp1_delay1 = Delay(temp1,1)
temp1_delay2 = Delay(temp1,2)
data = pd.concat([temp1,temp1_delay1,temp1_delay2],axis = 1,join = 'inner')
data.columns = ['temp1','temp1_delay1','temp1_delay2']
temp2 = pd.DataFrame(np.sign(data['temp1']) + np.sign(data['temp1_delay1']) \
+ np.sign(data['temp1_delay2']))
volume_sum5 = Sum(volume,5)
volume_sum20 = Sum(volume,20)
data_temp = pd.concat([temp2,volume_sum5,volume_sum20],axis = 1,join = 'inner')
data_temp.columns = ['temp2','volume_sum5','volume_sum20']
temp3 = pd.DataFrame(data_temp['temp2'] * data_temp['volume_sum5']/\
data_temp['volume_sum20'])
alpha = -1 * Rank(temp3)
alpha.columns = ['alpha48']
return alpha
@timer
def alpha49(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 0
price['temp'][price['sum'] < price['sum_delay']] = 1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha49']
return alpha
@timer
def alpha50(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = -1
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha50']
return alpha
@timer
def alpha51(self):
low = self.low
high = self.high
data = pd.concat([low,high],axis = 1,join = 'inner')
price_sum = pd.DataFrame(data['Low'] + data['High'])
price_sum_delay = Delay(price_sum,1)
price = pd.concat([price_sum,price_sum_delay],axis = 1,join = 'inner')
price.columns = ['sum','sum_delay']
price['temp'] = 1
price['temp'][price['sum'] <= price['sum_delay']] = 0
alpha = Sum(pd.DataFrame(price['temp']),12)
alpha.columns = ['alpha51']
return alpha
@timer
def alpha52(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
data['sum_delay'] = Delay(pd.DataFrame((data['High'] + data['Low'] + data['Close'])/3),1)
temp1 = pd.DataFrame(data['High'] - data['sum_delay'])
temp1.columns = ['high_diff']
temp2 = pd.DataFrame(data['sum_delay'] - data['Low'])
temp2.columns = ['low_diff']
temp1['max'] = temp1['high_diff']
temp1['max'][temp1['high_diff'] < 0 ] = 0
temp2['max'] = temp2['low_diff']
temp2['max'][temp2['low_diff'] < 0 ] = 0
temp1_sum = Sum(pd.DataFrame(temp1['max']),26)
temp2_sum = Sum(pd.DataFrame(temp2['max']),26)
alpha_temp = pd.concat([temp1_sum,temp2_sum],axis = 1,join = 'inner')
alpha_temp.columns = ['s1','s2']
alpha = pd.DataFrame(alpha_temp['s1']/alpha_temp['s2'] * 100)
alpha.columns = ['alpha52']
return alpha
@timer
def alpha53(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,12)
alpha = count/12.0 * 100
alpha.columns = ['alpha53']
return alpha
@timer
def alpha54(self):
Open = self.open
close = self.close
data = pd.concat([Open,close], axis = 1, join = 'inner')
data.columns = ['close','open']
temp = pd.DataFrame(data['close'] - data['open'])
temp_abs = pd.DataFrame(np.abs(temp))
df = pd.concat([temp,temp_abs], axis = 1, join= 'inner')
df.columns = ['temp','abs']
std = STD(pd.DataFrame(df['temp'] + df['abs']),10)
corr = Corr(data,10)
data1 = pd.concat([corr,std],axis = 1, join = 'inner')
data1.columns = ['corr','std']
alpha = Rank(pd.DataFrame(data1['corr'] + data1['std'])) * -1
alpha.columns = ['alpha54']
return alpha
@timer
def alpha55(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
tep = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha = Sum(tep,20)
alpha.columns = ['alpha55']
return alpha
@timer
def alpha56(self):
low = self.low
high = self.high
volume = self.volume
Open = self.open
open_min = TsMin(Open,12)
data1 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data1.columns = ['open','open_min']
r1 = Rank(pd.DataFrame(data1['open'] - data1['open_min']))
volume_mean = Mean(volume,40)
volume_mean_sum= Sum(volume_mean,19)
data2 = pd.concat([high,low],axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
rank = pd.concat([temp,volume_mean_sum],axis = 1 , join = 'inner')
rank.columns = ['temp','volume_mean_sum']
corr = Corr(rank,13)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] >= r['r2']] = 1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha56']
return alpha
@timer
def alpha57(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = pd.DataFrame((data['close'] - data['low_min'])/(data['high_max'] \
- data['low_min']) * 100)
alpha = SMA(temp,3,1)
alpha.columns = ['alpha57']
return alpha
@timer
def alpha58(self):
close = self.close
close_delay = Delay(close,1)
count = Count(0,close,close_delay,20)
alpha = count/20.0 * 100
alpha.columns = ['alpha58']
return alpha
@timer
def alpha59(self):
low = self.low
high = self.high
close = self.close
close_delay = Delay(close,1)
max_temp = pd.concat([high,close_delay],axis = 1,join = 'inner')
min_temp = pd.concat([low,close_delay],axis = 1,join = 'inner')
max_temp1 = pd.DataFrame(np.max(max_temp,axis = 1))
min_temp1 = pd.DataFrame(np.min(min_temp,axis = 1))
data = pd.concat([close,close_delay,max_temp1,min_temp1],axis = 1,join = 'inner')
data.columns = ['close','close_delay','max','min']
data['max'][data['close'] > data['close_delay']] = 0
data['min'][data['close'] <= data['close_delay']] = 0
alpha = pd.DataFrame(data['max'] + data['min'])
alpha.columns = ['alpha59']
return alpha
@timer
def alpha60(self):
low = self.low
high = self.high
close = self.close
volume = self.volume
data = pd.concat([low,high,close,volume],axis = 1,join = 'inner')
temp = pd.DataFrame((2 * data['Close'] - data['Low'] - data['High'])/(data['Low'] + \
data['High']) * data['Vol'])
alpha = Sum(temp,20)
alpha.columns = ['alpha60']
return alpha
@timer
def alpha61(self):
low = self.low
volume = self.volume
vwap = self.vwap
vwap_delta = Delta(vwap,1)
vwap_delta_decay = DecayLinear(vwap_delta,12)
r1 = Rank(vwap_delta_decay)
volume_mean = Mean(volume,80)
data = pd.concat([low,volume_mean],axis = 1,join = 'inner')
corr = Corr(data,8)
corr_decay = DecayLinear(corr,17)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) * -1)
alpha.columns = ['alpha61']
return alpha
@timer
def alpha62(self):
high = self.high
volume = self.volume
volume_r = Rank(volume)
data = pd.concat([high,volume_r],axis = 1,join = 'inner')
alpha = -1 * Corr(data,5)
alpha.columns = ['alpha62']
return alpha
@timer
def alpha63(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),6,1)
sma2 = SMA(pd.DataFrame(data['abs']),6,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha63']
return alpha
@timer
def alpha64(self):
vwap = self.vwap
volume = self.volume
close = self.close
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data1 = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr1 = Corr(data1,4)
corr1_decay = DecayLinear(corr1,4)
r1 = Rank(corr1_decay)
close_mean = Mean(close,60)
close_r = Rank(close)
close_mean_r = Rank(close_mean)
data2 = pd.concat([close_r,close_mean_r],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_max = TsMax(corr2,13)
corr2_max_decay = DecayLinear(corr2_max,14)
r2 = Rank(corr2_max_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.max(r,axis = 1) *-1)
alpha.columns = ['alpha64']
return alpha
@timer
def alpha65(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = pd.DataFrame(data['close_mean']/data['close'])
alpha.columns = ['alpha65']
return alpha
@timer
def alpha66(self):
close = self.close
close_mean = Mean(close,6)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha66']
return alpha
@timer
def alpha67(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),24,1)
sma2 = SMA(pd.DataFrame(data['abs']),24,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha67']
return alpha
@timer
def alpha68(self):
high = self.high
volume = self.volume
low = self.low
data = pd.concat([high,low,volume],axis = 1,join = 'inner')
data['sum']= (data['High'] + data['Low'])/2
data['sum_delta'] = Delta(pd.DataFrame(data['sum']),1)
temp = data['sum_delta'] * (data['High'] - data['Low'])/data['Vol']
alpha = SMA(pd.DataFrame(temp),15,2)
alpha.columns = ['alpha68']
return alpha
@timer
def alpha69(self):
high = self.high
low = self.low
Open = self.open
dtm = DTM(Open,high)
dbm = DBM(Open,low)
dtm_sum = Sum(dtm,20)
dbm_sum = Sum(dbm,20)
data = pd.concat([dtm_sum,dbm_sum],axis = 1, join = 'inner')
data.columns = ['dtm','dbm']
data['temp1'] = (data['dtm'] - data['dbm'])/data['dtm']
data['temp2'] = (data['dtm'] - data['dbm'])/data['dbm']
data['temp1'][data['dtm'] <= data['dbm']] = 0
data['temp2'][data['dtm'] >= data['dbm']] = 0
alpha = pd.DataFrame(data['temp1'] + data['temp2'])
alpha.columns = ['alpha69']
return alpha
@timer
def alpha70(self):
amount = self.amt
alpha= STD(amount,6)
alpha.columns = ['alpha70']
return alpha
@timer
def alpha71(self):
close = self.close
close_mean = Mean(close,24)
data = pd.concat([close,close_mean],axis = 1, join = 'inner')
data.columns = ['close','close_mean']
alpha = (data['close'] - data['close_mean'])/data['close_mean'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha71']
return alpha
@timer
def alpha72(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),15,1)
alpha.columns = ['alpha72']
return alpha
@timer
def alpha73(self):
vwap = self.vwap
volume = self.volume
close = self.close
data1 = pd.concat([close,volume],axis = 1,join = 'inner')
corr1 = Corr(data1,10)
corr1_decay = DecayLinear(DecayLinear(corr1,16),4)
r1 = TsRank(corr1_decay,5)
volume_mean = Mean(volume,30)
data2 = pd.concat([vwap,volume_mean],axis = 1,join = 'inner')
corr2 = Corr(data2,4)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1,join ='inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r2'] - r['r1'])
alpha.columns= ['alpha73']
return alpha
@timer
def alpha74(self):
vwap = self.vwap
volume = self.volume
low = self.low
volume_mean = Mean(volume,40)
volume_mean_sum = Sum(volume_mean,20)
data1 = pd.concat([low,vwap],axis = 1,join = 'inner')
data_sum = Sum(pd.DataFrame(data1['Low'] * 0.35 + data1['Vwap'] * 0.65),20)
data = pd.concat([volume_mean_sum,data_sum],axis = 1,join = 'inner')
corr = Corr(data,7)
r1 = Rank(corr)
vwap_r = Rank(vwap)
volume_r = Rank(volume)
data_temp = pd.concat([vwap_r,volume_r],axis = 1,join = 'inner')
corr2 = Corr(data_temp,6)
r2 = Rank(corr2)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] + r['r2'])
alpha.columns = ['alpha74']
return alpha
@timer
def alpha75(self):
close = self.close
Open = self.open
close_index = self.close_index
open_index = self.open_index
data1 = pd.concat([close,Open], axis = 1, join = 'inner')
data1.columns = ['close','open']
data1['temp'] = 1
data1['temp'][data1['close'] <= data1['open']] = 0
data2 = pd.concat([close_index,open_index], axis = 1, join = 'inner')
data2.columns = ['close','open']
data2['tep'] = 1
data2['tep'][data2['close'] > data2['open']] = 0
temp = data1['temp'].unstack()
tep = data2['tep'].unstack()
tep1 = repmat(tep,1,np.size(temp,1))
data3 = temp * tep1
temp_result = data3.rolling(50,min_periods = 50).sum()
tep_result = tep.rolling(50,min_periods = 50).sum()
tep2_result = np.matlib.repmat(tep_result,1,np.size(temp,1))
result = temp_result/tep2_result
alpha = pd.DataFrame(result.stack())
alpha.columns = ['alpha75']
return alpha
@timer
def alpha76(self):
volume = self.volume
close = self.close
close_delay = Delay(close,1)
data = pd.concat([volume,close,close_delay],axis = 1,join = 'inner')
data.columns = ['volume','close','close_delay']
temp = pd.DataFrame(np.abs((data['close']/data['close_delay'] -1 )/data['volume']))
temp_std = STD(temp,20)
temp_mean = Mean(temp,20)
data_temp = pd.concat([temp_std,temp_mean],axis = 1,join = 'inner')
data_temp.columns = ['std','mean']
alpha = pd.DataFrame(data_temp['std']/data_temp['mean'])
alpha.columns = ['alpha76']
return alpha
@timer
def alpha77(self):
vwap = self.vwap
volume = self.volume
low = self.low
high = self.high
data = pd.concat([high,low,vwap],axis = 1,join = 'inner')
temp = pd.DataFrame((data['High'] + data['Low'])/2 - data['Vwap'])
temp_decay = DecayLinear(temp,20)
r1 = Rank(temp_decay)
temp1 = pd.DataFrame((data['High'] + data['Low'])/2)
volume_mean = Mean(volume,40)
data2 = pd.concat([temp1,volume_mean],axis = 1,join = 'inner')
corr = Corr(data2,3)
corr_decay = DecayLinear(corr,6)
r2 = Rank(corr_decay)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
alpha = pd.DataFrame(np.min(r,axis = 1))
alpha.columns = ['alpha77']
return alpha
@timer
def alpha78(self):
low = self.low
high = self.high
close = self.close
data = pd.concat([low,high,close],axis = 1,join = 'inner')
temp = pd.DataFrame((data['Low'] + data['High'] + data['Close'])/3)
temp.columns = ['temp']
temp_mean = Mean(temp,12)
temp_mean.columns = ['temp_mean']
temp2 = pd.concat([temp,temp_mean],axis = 1,join = 'inner')
tmp = pd.DataFrame(temp2['temp'] - temp2['temp_mean'])
data1 = pd.concat([close,temp_mean],axis = 1,join = 'inner')
temp_abs = pd.DataFrame(np.abs(data1['Close'] - data1['temp_mean']))
temp_abs_mean = Mean(temp_abs,12)
df = pd.concat([tmp,temp_abs_mean],axis = 1,join = 'inner')
df.columns = ['df1','df2']
alpha = pd.DataFrame(df['df1']/(df['df2'] * 0.015))
alpha.columns = ['alpha78']
return alpha
@timer
def alpha79(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delay']
data['max'] = data['close'] - data['close_delay']
data['max'][data['max'] < 0] = 0
data['abs'] = np.abs(data['close'] - data['close_delay'])
sma1 = SMA(pd.DataFrame(data['max']),12,1)
sma2 = SMA(pd.DataFrame(data['abs']),12,1)
sma = pd.concat([sma1,sma2],axis = 1,join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/sma['sma2'] * 100)
alpha.columns = ['alpha79']
return alpha
@timer
def alpha80(self):
volume = self.volume
volume_delay = Delay(volume,5)
volume_delay.columns = ['volume_delay']
data = pd.concat([volume,volume_delay],axis = 1,join = 'inner')
alpha = (data['Vol'] - data['volume_delay'])/data['volume_delay']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha80']
return alpha
@timer
def alpha81(self):
volume = self.volume
alpha = SMA(volume,21,2)
alpha.columns = ['alpha81']
return alpha
@timer
def alpha82(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,6)
high_max = TsMax(high,6)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = (data['high_max'] - data['close'])/(data['high_max'] - data['low_min']) * 100
alpha = SMA(pd.DataFrame(temp),20,1)
alpha.columns = ['alpha82']
return alpha
@timer
def alpha83(self):
high = self.high
volume = self.volume
high_r = Rank(high)
volume_r = Rank(volume)
data = pd.concat([high_r,volume_r],axis = 1,join = 'inner')
corr = Corr(data,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha83']
return alpha
@timer
def alpha84(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,20)
alpha.columns = ['alpha84']
return alpha
@timer
def alpha85(self):
close = self.close
volume = self.volume
volume_mean = Mean(volume,20)
close_delta = Delta(close,7)
data1 = pd.concat([volume,volume_mean],axis = 1,join = 'inner')
data1.columns = ['volume','volume_mean']
temp1 = pd.DataFrame(data1['volume']/data1['volume_mean'])
r1 = TsRank(temp1,20)
r2 = TsRank(-1 * close_delta,8)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha85']
return alpha
@timer
def alpha86(self):
close = self.close
close_delay20 = Delay(close,20)
close_delay10 = Delay(close,20)
data = pd.concat([close,close_delay20,close_delay10],axis = 1,join = 'inner')
data.columns = ['close','close_delay20','close_delay10']
temp = pd.DataFrame((data['close_delay20'] - data['close_delay10'])/10 - \
(data['close_delay10'] - data['close'])/10)
close_delta = Delta(close,1) * -1
data_temp = pd.concat([close_delta,temp],axis = 1,join = 'inner')
data_temp.columns = ['close_delta','temp']
data_temp['close_delta'][data_temp['temp'] > 0.25]= -1
data_temp['close_delta'][data_temp['temp'] < 0]= 1
alpha = pd.DataFrame(data_temp['close_delta'])
alpha.columns = ['alpha86']
return alpha
@timer
def alpha87(self):
vwap = self.vwap
high = self.high
low = self.low
Open = self.open
vwap_delta = Delta(vwap,4)
vwap_delta_decay = DecayLinear(vwap_delta,7)
r1 = Rank(vwap_delta_decay)
data = pd.concat([low,high,vwap,Open], axis = 1, join = 'inner')
temp = pd.DataFrame((data['Low'] * 0.1 + data['High'] * 0.9 - data['Vwap'])/\
(data['Open'] - 0.5 * (data['Low'] + data['High'])))
temp_decay = DecayLinear(temp,11)
r2 = TsRank(temp_decay,7)
r = pd.concat([r1,r2], axis = 1,join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(-1 * (r['r1'] + r['r2']))
alpha.columns = ['alpha87']
return alpha
@timer
def alpha88(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1,join = 'inner')
data.columns = ['close','close_delta']
alpha = (data['close'] - data['close_delta'])/data['close_delta'] * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha88']
return alpha
@timer
def alpha89(self):
close = self.close
sma1 = SMA(close,13,2)
sma2 = SMA(close,27,2)
sma = pd.concat([sma1,sma2],axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
temp = pd.DataFrame(sma['sma1'] - sma['sma2'])
sma3 = SMA(temp,10,2)
data = pd.concat([temp,sma3],axis = 1, join = 'inner')
data.columns = ['temp','sma']
alpha = pd.DataFrame(2 *(data['temp'] - data['sma']))
alpha.columns = ['alpha89']
return alpha
@timer
def alpha90(self):
volume = self.volume
vwap = self.vwap
r1 = Rank(volume)
r2 = Rank(vwap)
rank = pd.concat([r1,r2], axis = 1, join = 'inner')
corr = Corr(rank,5)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha90']
return alpha
@timer
def alpha91(self):
close = self.close
volume = self.volume
low = self.low
close_max = TsMax(close,5)
data1 = pd.concat([close,close_max], axis = 1,join = 'inner')
data1.columns = ['close','close_max']
r1 = Rank(pd.DataFrame(data1['close'] - data1['close_max']))
volume_mean = Mean(volume,40)
data2 = pd.concat([volume_mean,low], axis = 1, join = 'inner')
corr = Corr(data2,5)
r2 = Rank(corr)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha91']
return alpha
@timer
def alpha92(self):
volume = self.volume
vwap = self.vwap
close = self.close
data = pd.concat([close,vwap],axis = 1, join = 'inner')
data['price'] = data['Close'] * 0.35 + data['Vwap'] * 0.65
price_delta = Delta(pd.DataFrame(data['price']),2)
price_delta_decay = DecayLinear(price_delta,3)
r1 = Rank(price_delta_decay)
volume_mean = Mean(volume,180)
rank = pd.concat([volume_mean,close],axis = 1,join = 'inner')
corr = Corr(rank,13)
temp = pd.DataFrame(np.abs(corr))
temp_decay = DecayLinear(temp,5)
r2 = TsRank(temp_decay,15)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
alpha = pd.DataFrame(-1 * np.max(r, axis = 1))
alpha.columns = ['alpha92']
return alpha
@timer
def alpha93(self):
low = self.low
Open = self.open
open_delay = Delay(Open,1)
data = pd.concat([low,Open,open_delay],axis = 1,join = 'inner')
data.columns = ['low','open','open_delay']
temp1 = pd.DataFrame(data['open'] - data['low'])
temp2 = pd.DataFrame(data['open'] - data['open_delay'])
data_temp = pd.concat([temp1,temp2],axis = 1 ,join = 'inner')
temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
temp_max.columns = ['max']
data2 = pd.concat([data,temp_max],axis = 1,join = 'inner')
data2['temp'] = data2['max']
data2['temp'][data2['open'] >= data2['open_delay']] = 0
alpha = Sum(pd.DataFrame(data2['temp']),20)
alpha.columns = ['alpha93']
return alpha
@timer
def alpha94(self):
close = self.close
volume = self.volume
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([close,close_delay,volume],axis = 1,join = 'inner')
data['sign'] = 1
data['sign'][data['Close'] < data['close_delay']] = -1
data['sign'][data['Close'] == data['close_delay']] = 0
temp = pd.DataFrame(data['Vol'] * data['sign'])
alpha = Sum(temp,30)
alpha.columns = ['alpha94']
return alpha
@timer
def alpha95(self):
amt = self.amt
alpha = STD(amt,20)
alpha.columns = ['alpha95']
return alpha
@timer
def alpha96(self):
low = self.low
high = self.high
close = self.close
low_min = TsMin(low,9)
high_max = TsMax(high,9)
data = pd.concat([close,low_min,high_max],axis = 1,join = 'inner')
data.columns = ['close','low_min','high_max']
temp = ( data['close'] - data['low_min'])/(data['high_max'] - data['low_min']) * 100
alpha_temp = SMA(pd.DataFrame(temp),3,1)
alpha = SMA(alpha_temp,3,1)
alpha.columns = ['alpha96']
return alpha
@timer
def alpha97(self):
volume = self.volume
alpha = STD(volume,10)
alpha.columns = ['alpha97']
return alpha
@timer
def alpha98(self):
close = self.close
close_mean = Mean(close,100)
close_mean_delta = Delta(close_mean,100)
close_delay = Delay(close,100)
data = pd.concat([close_mean_delta,close_delay],axis = 1,join = 'inner')
data.columns = ['delta','delay']
temp = pd.DataFrame(data['delta']/ data['delay'])
close_delta = Delta(close,3)
close_min = TsMin(close,100)
data_temp = pd.concat([close,close_delta,close_min,temp],axis = 1,join = 'inner')
data_temp.columns = ['close','close_delta','close_min','temp']
data_temp['diff'] = (data_temp['close'] - data_temp['close_min']) * -1
data_temp['diff'][data_temp['temp'] < 0.05] = 0
data_temp['close_delta'] = data_temp['close_delta'] * -1
data_temp['close_delta'][data_temp['temp'] >= 0.05]= 0
alpha = pd.DataFrame(data_temp['close_delta'] + data_temp['diff'])
alpha.columns = ['alpha98']
return alpha
@timer
def alpha99(self):
close = self.close
volume = self.volume
r1 = Rank(close)
r2 = Rank(volume)
r = pd.concat([r1,r2],axis = 1,join = 'inner')
cov = Cov(r,5)
alpha = -1 * Rank(cov)
alpha.columns = ['alpha99']
return alpha
@timer
def alpha100(self):
volume = self.volume
alpha = STD(volume,20)
alpha.columns = ['alpha100']
return alpha
@timer
def alpha101(self):
close = self.close
volume = self.volume
high = self.high
vwap = self.vwap
volume_mean = Mean(volume,30)
volume_mean_sum = Sum(volume_mean,37)
data1 = pd.concat([close,volume_mean_sum], axis = 1, join = 'inner')
corr1 = Corr(data1,15)
r1 = Rank(corr1)
data2 = pd.concat([high,vwap],axis = 1, join = 'inner')
temp = pd.DataFrame(data2['High'] * 0.1 + data2['Vwap'] * 0.9)
temp_r = Rank(temp)
volume_r = Rank(volume)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,11)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
r['alpha'] = 0
r['alpha'][r['r1'] < r['r2']] = -1
alpha = pd.DataFrame(r['alpha'])
alpha.columns = ['alpha101']
return alpha
@timer
def alpha102(self):
volume = self.volume
temp = Delta(volume,1)
temp.columns = ['temp']
temp['max'] = temp['temp']
temp['max'][temp['temp'] < 0 ] = 0
temp['abs'] = np.abs(temp['temp'])
sma1 = SMA(pd.DataFrame(temp['max']),6,1)
sma2 = SMA(pd.DataFrame(temp['abs']),6,1)
sma = pd.concat([sma1,sma2], axis = 1 ,join ='inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1']/ sma['sma2'] * 100)
alpha.columns = ['alpha102']
return alpha
@timer
def alpha103(self):
low = self.low
lowday = Lowday(low,20)
alpha = (20 - lowday)/20.0 * 100
alpha.columns = ['alpha103']
return alpha
@timer
def alpha104(self):
close = self.close
volume = self.volume
high = self.high
data = pd.concat([high,volume], axis = 1, join = 'inner')
corr = Corr(data,5)
corr_delta = Delta(corr,5)
close_std = STD(close,20)
r1 = Rank(close_std)
temp = pd.concat([corr_delta,r1], axis = 1, join = 'inner')
temp.columns = ['delta','r']
alpha = pd.DataFrame(-1 * temp['delta'] * temp['r'])
alpha.columns = ['alpha104']
return alpha
@timer
def alpha105(self):
volume = self.volume
Open = self.open
volume_r = Rank(volume)
open_r = Rank(Open)
rank = pd.concat([volume_r,open_r],axis = 1, join = 'inner')
alpha = -1 * Corr(rank,10)
alpha.columns = ['alpha105']
return alpha
@timer
def alpha106(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
alpha = pd.DataFrame(data['close'] - data['close_delay'])
alpha.columns = ['alpha106']
return alpha
@timer
def alpha107(self):
Open = self.open
high = self.high
close = self.close
low = self.low
high_delay = Delay(high,1)
close_delay = Delay(close,1)
low_delay = Delay(low,1)
data = pd.concat([high_delay,close_delay,low_delay,Open], axis = 1, join = 'inner')
data.columns = ['high_delay','close_delay','low_delay','open']
r1 = Rank(pd.DataFrame(data['open'] - data['high_delay']))
r2 = Rank(pd.DataFrame(data['open'] - data['close_delay']))
r3 = Rank(pd.DataFrame(data['open'] - data['low_delay']))
alpha = -1 * r1 * r2 * r3
alpha.columns = ['alpha107']
return alpha
@timer
def alpha108(self):
high = self.high
volume = self.volume
vwap = self.vwap
high_min = TsMin(high,2)
data1 = pd.concat([high,high_min], axis = 1, join = 'inner')
data1.columns = ['high','high_min']
r1 = Rank(pd.DataFrame(data1['high'] - data1['high_min']))
volume_mean = Mean(volume,120)
rank = pd.concat([vwap,volume_mean],axis = 1, join = 'inner')
corr = Corr(rank,6)
r2 = Rank(corr)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = r['r1'] * r['r2'] * -1
alpha.columns = ['alpha108']
return alpha
@timer
def alpha109(self):
high = self.high
low = self.low
data = pd.concat([high,low],axis = 1, join = 'inner')
temp = SMA(pd.DataFrame(data['High'] - data['Low']),10,2)
sma = SMA(temp,10,2)
sma_temp = pd.concat([temp,sma],axis = 1, join = 'inner')
sma_temp.columns = ['temp','sma']
alpha = pd.DataFrame(sma_temp['temp']/sma_temp['sma'])
alpha.columns = ['alpha109']
return alpha
@timer
def alpha110(self):
high = self.high
low = self.low
close = self.close
close_delay = Delay(close,1)
close_delay.columns = ['close_delay']
data = pd.concat([high,low,close_delay], axis = 1, join = 'inner')
data['max1'] = data['High'] - data['close_delay']
data['max2'] = data['close_delay'] - data['Low']
data['max1'][data['max1'] < 0] = 0
data['max2'][data['max2'] < 0] = 0
s1 = Sum(pd.DataFrame(data['max1']),20)
s2 = Sum(pd.DataFrame(data['max2']),20)
s = pd.concat([s1,s2], axis = 1 , join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'])
alpha.columns = ['alpha110']
return alpha
@timer
def alpha111(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
data = pd.concat([high,low,close,volume], axis = 1, join = 'inner')
temp = pd.DataFrame(data['Vol'] * (2 * data['Close'] - data['Low'] - data['High'])\
/(data['High'] - data['Low']))
sma1 = SMA(temp,11,2)
sma2 = SMA(temp,4,2)
sma = pd.concat([sma1, sma2], axis = 1, join = 'inner')
sma.columns = ['sma1','sma2']
alpha = pd.DataFrame(sma['sma1'] - sma['sma2'])
alpha.columns = ['alpha111']
return alpha
@timer
def alpha112(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close, close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['temp'] = 1
data['temp'][data['close'] > data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha112']
return alpha
@timer
def alpha113(self):
close = self.close
volume = self.volume
close_delay = Delay(close,5)
close_delay_mean = Mean(close_delay,20)
data1 = pd.concat([close,volume],axis = 1, join = 'inner')
corr = Corr(data1,2)
r1 = Rank(close_delay_mean)
data2 = pd.concat([r1,corr], axis = 1, join = 'inner')
data2.columns = ['r1','corr']
r1 = pd.DataFrame(data2['r1'] * data2['corr'])
close_sum5 = Sum(close,5)
close_sum20 = Sum(close,20)
data3 = pd.concat([close_sum5,close_sum20],axis = 1, join = 'inner')
corr2 = Corr(data3,2)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'] * -1)
alpha.columns = ['alpha113']
return alpha
@timer
def alpha114(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
vwap = self.vwap
close_mean = Mean(close,5)
data = pd.concat([high,low,close_mean], axis = 1, join = 'inner')
data.columns = ['high','low','close_mean']
temp = pd.DataFrame(data['high'] - data['low'] / data['close_mean'])
temp_delay = Delay(temp,2)
r1 = TsRank(temp_delay,5)
temp1 = pd.concat([temp,vwap,close], axis = 1, join = 'inner')
temp1.columns = ['temp','vwap','close']
tep = pd.DataFrame(temp1['temp']/(temp1['vwap'] - temp1['close']))
r2 = TsRank(volume,5)
data2 = pd.concat([r2,tep], axis = 1, join = 'inner')
data2.columns = ['r2','tep']
tep1 = pd.DataFrame(data2['r2']/data2['tep'])
r3 = TsRank(tep1,5)
r = pd.concat([r1,r3],axis = 1, join = 'inner')
r.columns = ['r1','r3']
alpha = pd.DataFrame(r['r1'] + r['r3'])
alpha.columns = ['alpha114']
return alpha
@timer
def alpha115(self):
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,30)
price = pd.concat([high,low], axis = 1, join = 'inner')
price.columns = ['high','low']
price_temp = price['high'] * 0.9 + price['low'] * 0.1
data = pd.concat([price_temp,volume_mean],axis = 1, join = 'inner')
corr = Corr(data,10)
r1 = Rank(corr)
data2 = pd.concat([high,low], axis = 1, join = 'inner')
temp = pd.DataFrame((data2['High'] + data2['Low'])/2)
temp_r = TsRank(temp,4)
volume_r = TsRank(volume,10)
data3 = pd.concat([temp_r,volume_r], axis = 1, join = 'inner')
corr2 = Corr(data3,7)
r2 = Rank(corr2)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] * r['r2'])
alpha.columns = ['alpha115']
return alpha
@timer
def alpha116(self):
close = self.close
alpha = RegResi(0,close,None,20)
alpha.columns = ['alpha116']
return alpha
@timer
def alpha117(self):
high = self.high
low = self.low
close = self.close
volume = self.volume
ret = self.ret
r1 = TsRank(volume,32)
data1 = pd.concat([close,high,low],axis = 1, join = 'inner')
r2 = TsRank(pd.DataFrame(data1['Close'] + data1['High'] - data1['Low']),16)
r3 = TsRank(ret,32)
r = pd.concat([r1,r2,r3], axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(r['r1'] * (1 - r['r2']) * (1 - r['r3']))
alpha.columns = ['alpha117']
return alpha
@timer
def alpha118(self):
high = self.high
low = self.low
Open = self.open
data = pd.concat([high,low,Open], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame(data['High'] - data['Open']),20)
s2 = Sum(pd.DataFrame(data['Open'] - data['Low']),20)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(s['s1']/s['s2'] * 100)
alpha.columns = ['alpha118']
return alpha
@timer
def alpha119(self):
Open = self.open
volume = self.volume
vwap = self.vwap
volume_mean = Mean(volume,5)
volume_mean_sum = Sum(volume_mean,26)
data1 = pd.concat([vwap,volume_mean_sum],axis = 1, join = 'inner')
corr1 = Corr(data1,5)
corr1_decay = DecayLinear(corr1,7)
r1 = Rank(corr1_decay)
open_r = Rank(Open)
volume_mean2 = Mean(volume,15)
volume_mean2_r = Rank(volume_mean2)
data2 = pd.concat([open_r, volume_mean2_r], axis = 1, join = 'inner')
corr2 = Corr(data2,21)
corr2_min = TsMin(corr2,9)
corr2_min_r = TsRank(corr2_min,7)
corr_min_r_decay = DecayLinear(corr2_min_r,8)
r2 = Rank(corr_min_r_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha119']
return alpha
@timer
def alpha120(self):
vwap = self.vwap
close = self.close
data = pd.concat([vwap,close], axis = 1, join = 'inner')
r1 = Rank(pd.DataFrame(data['Vwap'] - data['Close']))
r2 = Rank(pd.DataFrame(data['Vwap'] + data['Close']))
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha120']
return alpha
@timer
def alpha121(self):
vwap = self.vwap
volume = self.volume
vwap_r = TsRank(vwap,20)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,2)
data = pd.concat([vwap_r,volume_mean_r], axis = 1, join = 'inner')
corr= Corr(data,18)
temp = TsRank(corr,3)
vwap_min = TsMin(vwap,12)
data2 = pd.concat([vwap,vwap_min],axis = 1, join = 'inner')
data2.columns = ['vwap','vwap_min']
rank = Rank(pd.DataFrame(data2['vwap'] - data2['vwap_min']))
data3 = pd.concat([rank,temp],axis = 1, join = 'inner')
data3.columns = ['rank','temp']
alpha = pd.DataFrame(np.power(data3['rank'],data3['temp']) * -1)
alpha.columns = ['alpha121']
return alpha
@timer
def alpha122(self):
close = self.close
close_ln = pd.DataFrame(np.log(close))
temp1 = SMA(close_ln,13,2)
sma1 = SMA(temp1,13,2)
sma2 = SMA(sma1,13,2)
sma3 = SMA(sma2,13,2)
sma3_delay = Delay(sma3,1)
data = pd.concat([sma3,sma3_delay],axis = 1, join = 'inner')
data.columns = ['sma','sma_delay']
alpha = pd.DataFrame(data['sma']/data['sma_delay'])
alpha.columns = ['alpha122']
return alpha
@timer
def alpha123(self):
volume = self.volume
high = self.high
low = self.low
data1 = pd.concat([high,low], axis = 1, join = 'inner')
s1 = Sum(pd.DataFrame((data1['High'] + data1['Low'])/2),20)
volume_mean = Mean(volume,60)
s2 = Sum(volume_mean,20)
data2 = pd.concat([s1,s2], axis = 1, join = 'inner')
corr1 = Corr(data2,9)
data3 = pd.concat([low,volume], axis = 1, join = 'inner')
corr2 = Corr(data3,6)
corr1_r = Rank(corr1)
corr2_r = Rank(corr2)
data = pd.concat([corr1_r,corr2_r], axis = 1, join = 'inner')
data.columns = ['r1','r2']
data['alpha'] = -1
data['alpha'][data['r1'] >= data['r2']] = 0
alpha = pd.DataFrame(data['alpha'])
alpha.columns = ['alpha123']
return alpha
@timer
def alpha124(self):
close = self.close
vwap = self.vwap
close_max = TsMax(close,30)
close_max_r = Rank(close_max)
close_max_r_decay = DecayLinear(close_max_r,2)
close_max_r_decay.columns = ['decay']
data = pd.concat([close,vwap,close_max_r_decay], axis = 1, join ='inner')
alpha = pd.DataFrame((data['Close'] - data['Vwap'])/data['decay'])
alpha.columns = ['alpha124']
return alpha
@timer
def alpha125(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,80)
data1 = pd.concat([vwap,volume_mean], axis = 1, join = 'inner')
corr1 = Corr(data1,17)
data2 = pd.concat([close,vwap], axis = 1, join = 'inner')
temp2 = pd.DataFrame(0.5*(data2['Close'] + data2['Vwap']))
temp2_delta = Delta(temp2,3)
corr1_decay = DecayLinear(corr1,20)
r1 = Rank(corr1_decay)
temp2_delta_decay = DecayLinear(temp2_delta,16)
r2 = Rank(temp2_delta_decay)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha125']
return alpha
@timer
def alpha126(self):
close = self.close
high = self.high
low = self.low
data = pd.concat([close,high,low], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] + data['High'] + data['Low'])/3)
alpha.columns = ['alpha126']
return alpha
@timer
def alpha127(self):
close = self.close
close_max = TsMax(close,12)
data = pd.concat([close,close_max], axis = 1, join = 'inner')
data.columns = ['close','close_max']
alpha = pd.DataFrame((data['close'] - data['close_max'])/data['close_max'])
alpha.columns = ['alpha127']
return alpha
@timer
def alpha128(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
data = pd.concat([close,high,low,volume], axis = 1, join = 'inner')
data['temp1'] = (data['Close'] + data['Low'] + data['High'])/3
data['temp2'] = data['temp1'] * data['Vol']
data['temp3'] = data['temp1'] * data['Vol']
temp_delay = Delay(pd.DataFrame(data['temp1']),1)
temp_delay.columns = ['temp_decay']
data = pd.concat([data,temp_delay], axis = 1, join = 'inner')
data['temp2'][data['temp1'] < data['temp_decay']] = 0
data['temp3'][data['temp1'] > data['temp_decay']] = 0
s1 = Sum(pd.DataFrame(data['temp2']),14)
s2 = Sum(pd.DataFrame(data['temp3']),14)
s = pd.concat([s1,s2], axis = 1, join = 'inner')
s.columns = ['s1','s2']
alpha = pd.DataFrame(100 - 100/(1+ s['s1']/s['s2']))
alpha.columns = ['alpha128']
return alpha
@timer
def alpha129(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
data['abs'] = np.abs(data['close'] - data['close_delay'])
data['temp'] = data['abs']
data['temp'][data['close'] < data['close_delay']] = 0
alpha = Sum(pd.DataFrame(data['temp']),12)
alpha.columns = ['alpha129']
return alpha
@timer
def alpha130(self):
close = self.close
high = self.high
low = self.low
volume = self.volume
volume_mean = Mean(volume,40)
data1 = pd.concat([high,low],axis = 1, join = 'inner')
temp1 = pd.DataFrame((data1['High'] + data1['Low'])/2)
rank1 = pd.concat([temp1,volume_mean], axis = 1, join = 'inner')
corr = Corr(rank1,9)
close_r = Rank(close)
volume_r = Rank(volume)
data2 = pd.concat([close_r,volume_r],axis = 1, join = 'inner')
corr2 = Corr(data2,7)
corr_decay = DecayLinear(corr,10)
r1 = Rank(corr_decay)
corr2_decay = DecayLinear(corr2,3)
r2 = Rank(corr2_decay)
r = pd.concat([r1,r2],axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1']/r['r2'])
alpha.columns = ['alpha130']
return alpha
@timer
def alpha131(self):
close = self.close
vwap = self.vwap
volume = self.volume
volume_mean = Mean(volume,50)
data1 = pd.concat([close,volume_mean], axis = 1, join = 'inner')
corr = Corr(data1,18)
vwap_delta = Delta(vwap,1)
temp2 = TsRank(corr,18)
data2 = pd.concat([vwap_delta,temp2],axis = 1, join = 'inner')
data2.columns = ['vwap_delta','temp2']
temp3 = np.power(data2['vwap_delta'],data2['temp2'])
alpha = Rank(pd.DataFrame(temp3))
alpha.columns = ['alpha131']
return alpha
@timer
def alpha132(self):
amt = self.amt
alpha = Mean(amt,20)
alpha.columns = ['alpha132']
return alpha
@timer
def alpha133(self):
low = self.low
high = self.high
highday = Highday(high,20)
lowday = Lowday(low,20)
data = pd.concat([highday,lowday],axis = 1, join = 'inner')
data.columns = ['highday','lowday']
alpha = (20 - data['highday']/20.0) * 100 - (20 - data['lowday']/20.0) * 100
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha133']
return alpha
@timer
def alpha134(self):
close = self.close
volume = self.volume
close_delay = Delay(close,12)
close_delay.columns = ['close_delay']
data = pd.concat([close,volume,close_delay], axis = 1, join = 'inner')
alpha = pd.DataFrame((data['Close'] - data['close_delay'])/data['close_delay'])
alpha.columns = ['alpha134']
return alpha
@timer
def alpha135(self):
close = self.close
close_delay = Delay(close,20)
data = pd.concat([close,close_delay],axis = 1 , join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame(data['close']/data['close_delay'])
temp_delay = Delay(temp,1)
alpha = SMA(temp_delay,20,1)
alpha.columns = ['alpha135']
return alpha
@timer
def alpha136(self):
volume = self.volume
Open = self.open
ret = self.ret
ret_delta = Delta(ret,3)
ret_delta_r = Rank(ret_delta)
data = pd.concat([Open,volume],axis = 1, join = 'inner')
corr = Corr(data,10)
data_temp = pd.concat([ret_delta_r,corr],axis = 1, join = 'inner')
data_temp.columns = ['ret_delta','corr']
alpha = pd.DataFrame(-1 * data_temp['ret_delta'] * data_temp['corr'])
alpha.columns = ['alpha136']
return alpha
@timer
def alpha137(self):
Open = self.open
close = self.close
low = self.low
high = self.high
close_delay = Delay(close,1)
open_delay = Delay(Open,1)
low_delay = Delay(low,1)
data = pd.concat([Open,close,low,high,close_delay,open_delay,low_delay], axis =1 ,join = 'inner')
data.columns= ['open','close','low','high','close_delay','open_delay','low_delay']
temp1 = pd.DataFrame((data['close'] - data['close_delay'] + (data['close'] - data['open'])/2\
+ data['close_delay'] - data['open_delay'])/ np.abs(data['high'] - data['close_delay']))
temp2 = pd.DataFrame(np.abs(data['high'] - data['close_delay']) + np.abs(data['low'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
temp3 = pd.DataFrame(np.abs(data['low'] - data['close_delay']) + np.abs(data['high'] - data['close_delay'])/2 \
+ np.abs(data['close_delay'] - data['open_delay'])/4)
abs1 = pd.DataFrame(np.abs(data['high'] - data['close_delay']))
abs2 = pd.DataFrame(np.abs(data['low'] - data['close_delay']))
abs3 = pd.DataFrame(np.abs(data['high'] - data['low_delay']))
data1 = pd.concat([abs1,abs2,abs3], axis = 1, join = 'inner')
data1.columns = ['abs1','abs2','abs3']
data_temp = pd.concat([abs1,abs2],axis = 1, join = 'inner')
data_temp_max = pd.DataFrame(np.max(data_temp,axis = 1))
data_temp_max.columns = ['max']
data_temp1 = pd.concat([data,data_temp_max], axis = 1, join = 'inner')
temp4 = pd.DataFrame((np.abs(data_temp1['high'] - data_temp1['low_delay']) + \
np.abs(data_temp1['close_delay'] - data_temp1['open_delay'])) *\
data_temp1['max'])
data1['judge1'] = 0
data1['judge2'] = 0
data1['judge3'] = 0
data1['judge4'] = 0
data1['judge1'][data1['abs1'] > data1['abs2']] = 1
data1['judge2'][data1['abs1'] > data1['abs3']] = 1
data1['judge3'][data1['abs2'] > data1['abs3']] = 1
data1['judge3'][data1['abs3'] > data1['abs1']] = 1
judge_1 = pd.DataFrame(data1['judge1'] * data1['judge2'])
judge_2 = pd.DataFrame(data1['judge3'] * data1['judge4'])
data2 = pd.concat([temp1,temp2,temp3,temp4,judge_1,judge_2], axis = 1, join = 'inner')
data2.columns = ['t1','t2','t3','t4','j1','j2']
data2['j3'] = 1
data2['j4'] = data2['j3'] - data2['j1'] - data2['j2']
data2['t5'] = data2['t2'] * data2['j1'] + data2['t3'] * data2['j2'] + \
data2['t4'] * data2['j4']
alpha = pd.DataFrame(16 * data2['t5']/data2['t1'])
alpha.columns = ['alpha137']
return alpha
@timer
def alpha138(self):
vwap = self.vwap
volume = self.volume
low = self.low
data1 = pd.concat([low,vwap], axis = 1, join = 'inner')
temp1 = pd.DataFrame(data1['Low'] * 0.7 + data1['Vwap'] * 0.3)
temp1_delta = Delta(temp1,3)
temp1_delta_decay = DecayLinear(temp1_delta,20)
r1 = Rank(temp1_delta_decay)
low_r = TsRank(low,8)
volume_mean = Mean(volume,60)
volume_mean_r = TsRank(volume_mean,17)
data2 = pd.concat([low_r,volume_mean_r],axis = 1, join = 'inner')
corr = Corr(data2,5)
corr_r = TsRank(corr,19)
corr_r_decay = DecayLinear(corr_r,16)
r2 = TsRank(corr_r_decay,7)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
r.columns = ['r1','r2']
alpha = pd.DataFrame(r['r1'] - r['r2'])
alpha.columns = ['alpha138']
return alpha
@timer
def alpha139(self):
Open = self.open
volume = self.volume
data = pd.concat([Open,volume], axis = 1, join = 'inner')
alpha = -1 * Corr(data,10)
alpha.columns = ['alpha139']
return alpha
@timer
def alpha140(self):
Open = self.open
volume = self.volume
high = self.high
low = self.low
close = self.close
open_r = Rank(Open)
low_r = Rank(low)
high_r = Rank(high)
close_r = Rank(close)
data1 = pd.concat([open_r,low_r,high_r,close_r],axis = 1, join = 'inner')
data1.columns = ['open_r','low_r','high_r','close_r']
temp = pd.DataFrame(data1['open_r'] + data1['low_r'] - \
(data1['high_r'] + data1['close_r']))
close_r_temp = TsRank(close,8)
volume_mean = Mean(volume,70)
volume_mean_r = TsRank(volume_mean,20)
data2 = pd.concat([close_r_temp,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data2,8)
temp_decay = DecayLinear(temp,8)
r1 = Rank(temp_decay)
corr_decay = DecayLinear(corr,7)
r2 = TsRank(corr_decay,3)
r = pd.concat([r1,r2], axis = 1, join = 'inner')
alpha = pd.DataFrame(np.min(r))
alpha.columns = ['alpha140']
return alpha
@timer
def alpha141(self):
volume = self.volume
high = self.high
volume_mean = Mean(volume,15)
high_r = Rank(high)
volume_mean_r = Rank(volume_mean)
data = pd.concat([high_r,volume_mean_r], axis = 1, join = 'inner')
corr = Corr(data,9)
alpha = -1 * Rank(corr)
alpha.columns = ['alpha141']
return alpha
@timer
def alpha142(self):
close = self.close
volume = self.volume
close_r = TsRank(close,10)
r1 = Rank(close_r)
close_delta = Delta(close,1)
close_delta_delta = Delta(close_delta,1)
r2 = Rank(close_delta_delta)
volume_mean = Mean(volume,20)
data = pd.concat([volume,volume_mean], axis = 1, join = 'inner')
data.columns = ['v','v_m']
temp = pd.DataFrame(data['v']/data['v_m'])
temp_r = TsRank(temp,5)
r3 = Rank(temp_r)
r = pd.concat([r1,r2,r3],axis = 1, join = 'inner')
r.columns = ['r1','r2','r3']
alpha = pd.DataFrame(- 1* r['r1'] * r['r2'] * r['r3'])
alpha.columns= ['alpha142']
return alpha
@timer
def alpha143(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay], axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] - data['close_delay'])/data['close_delay'])
temp.columns= ['temp']
data_temp = pd.concat([data,temp],axis = 1, join = 'inner')
data_temp['temp'][data_temp['close'] <= data_temp['close_delay']] = 1
temp_unstack = data_temp['temp'].unstack()
temp_unstack.iloc[0,:] = 1
df = np.cumprod(temp_unstack,axis = 0)
alpha = df.stack()
alpha.columns = ['alpha143']
return alpha
@timer
def alpha144(self):
close = self.close
amt = self.amt
close_delay = Delay(close,1)
data = pd.concat([close,close_delay,amt], axis = 1, join = 'inner')
data.columns = ['close','close_delay','amt']
data['temp'] = np.abs(data['close']/data['close_delay'] - 1)/data['amt']
data['sign'] = 1
data['sign'][data['close'] >= data['close_delay']] = 0
tep1 = Sum(pd.DataFrame(data['sign'] * data['temp']),20)
tep2 = Count(0,pd.DataFrame(data['close_delay']),pd.DataFrame(data['close']),20)
data2 = pd.concat([tep1,tep2], axis = 1, join = 'inner')
data2.columns = ['tep1','tep2']
alpha = pd.DataFrame(data2['tep1']/data2['tep2'])
alpha.columns = ['alpha144']
return alpha
@timer
def alpha145(self):
volume = self.volume
volume_mean9 = Mean(volume,9)
volume_mean26 = Mean(volume,26)
volume_mean12 = Mean(volume,12)
data = pd.concat([volume_mean9,volume_mean26,volume_mean12], axis = 1, join = 'inner')
data.columns = ['m9','m26','m12']
alpha = pd.DataFrame((data['m9'] - data['m26'])/data['m12'] * 100)
alpha.columns = ['alpha145']
return alpha
@timer
def alpha146(self):
close = self.close
close_delay = Delay(close,1)
data = pd.concat([close,close_delay],axis = 1, join = 'inner')
data.columns = ['close','close_delay']
temp = pd.DataFrame((data['close'] -data['close_delay'])/data['close_delay'])
sma1 = SMA(temp,61,2)
data2 = pd.concat([temp,sma1], axis = 1, join = 'inner')
data2.columns = ['temp1','sma1']
data2['temp2'] = data2['temp1'] - data2['sma1']
temp2_mean = Mean(pd.DataFrame(data2['temp2']),20)
sma2 = SMA(pd.DataFrame(data2['temp1'] - data2['temp2']),61,2)
data_temp = pd.concat([temp2_mean,pd.DataFrame(data2['temp2']),sma2], axis = 1 , join = 'inner')
data_temp.columns = ['temp2_mean','temp2','sma2']
alpha = data_temp['temp2_mean'] * data_temp['temp2'] / data_temp['sma2']
alpha = pd.DataFrame(alpha)
alpha.columns = ['alpha146']
return alpha
@timer
def alpha147(self):
close = self.close
close_mean = Mean(close,12)
alpha = RegBeta(0,close_mean,None,12)
alpha.columns = ['alpha147']
return alpha
@timer
def alpha148(self):
Open = self.open
volume = self.volume
volume_mean = Mean(volume,60)
volume_mean_s = Sum(volume_mean,9)
data = pd.concat([Open,volume_mean_s],axis = 1, join = 'inner')
corr = Corr(data,6)
r1 = Rank(corr)
open_min = TsMin(Open,14)
data2 = pd.concat([Open,open_min], axis = 1, join = 'inner')
data2.columns = ['open','open_min']
r2 = Rank( | pd.DataFrame(data2['open'] - data2['open_min']) | pandas.DataFrame |
import pandas as pd
import numpy as np
""" LOCAL IMPORTS """
from src.data_preprocessing import remove_misc, randomize_units
from src.common import Common
from src.common import get_max_len, create_final_data
from src.data_creation.laptop_data_classes import populate_spec
from src.data_creation.general_cpu_data_creation import create_general_cpu_data
from src.data_creation.general_drive_data import create_final_drive_data
from src.data_creation.gs_data_creation import create_computer_gs_data
from src.data_creation.laptop_data_creation import create_pseudo_laptop_data
from src.data_creation.pcpartpicker_data_creation import create_pcpartpicker_data
from src.data_creation.retailer_test_creation import create_laptop_test_data
from src.data_creation.neg_laptop_test_creation import create_neg_laptop_test_data
from src.data_creation.retailer_laptop_train_creation import create_retailer_laptop_train_data
def gen_gb_pos_data():
'''
Create positive gigabyte data (ex: 8 gb vs 8 gb) to essentially
differentiate between numbers.
'''
pos = []
for x in range(2, 5000, 2):
attr = '{} {}'.format(x, 'gb')
pos.append([attr, attr, 1])
return pd.DataFrame(pos, columns = Common.COLUMN_NAMES)
def gen_neg_gb_data():
'''
Create negative gigabyte data (ex: 8 gb vs 10 gb) to essentially
differentiate between numbers.
'''
neg = []
for x in range(2, 1000, 2):
for y in range(2, 1000, 2):
x_attr = '{} {}'.format(x, 'gb')
y_attr = '{} {}'.format(y, 'gb')
if x != y:
neg.append([x_attr, y_attr, 0])
return pd.DataFrame(neg, columns = Common.COLUMN_NAMES)
def create_data():
'''
Runs the necessary functions to create the data for training.
'''
# Don't show the copy warnings
pd.set_option('mode.chained_assignment', None)
# Run the functions
populate_spec()
create_pcpartpicker_data()
create_general_cpu_data()
create_final_drive_data()
create_pseudo_laptop_data()
final_gb_data = create_final_data(gen_gb_pos_data(), gen_neg_gb_data())
final_gb_data.reset_index(inplace=True)
randomize_units(final_gb_data, units=['gb'])
create_laptop_test_data()
create_neg_laptop_test_data()
create_retailer_laptop_train_data()
create_computer_gs_data()
print('Generating gigabyte data (as in just examples that use GB)')
# Load all the data
final_computer_df = pd.read_csv('data/train/wdc_computers.csv')
final_pseudo_laptop_df = | pd.read_csv('data/train/spec_train_data_new.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 2, 6), (0, 2, 0)], names=["num_legs", "num_wings"]
),
)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import copy
import gc
import numpy
from numpy.linalg import LinAlgError
import joblib
import pandas
import psutil
import pygmo
from scipy.optimize import minimize
from scipy.optimize import differential_evolution
import time
from typing import Dict, List, Tuple
import warnings
from .constants import Constants
from .constants import Messages
from .datatypes import Measurement
from .datatypes import Sensitivity
from .generalized_islands import ArchipelagoHelpers
from .generalized_islands import LossCalculator
from .generalized_islands import ParallelEstimationInfo
from .model_checking import ModelChecker
from .oed import CovOptimality
from .parameter import Parameter
from .parameter import ParameterMapper
from .parameter import ParameterManager
from .simulation import ExtendedSimulator
from .utils import Calculations
from .utils import OwnDict
from .utils import Helpers
EPS64 = Constants.eps_float64
PRETTY_METRICS = Constants.pretty_metrics
SINGLE_ID = Constants.single_id
class Caretaker():
"""
Manages (takes care of) all major methods related to simulation, estimation,
and evaluation of dynamic bioprocess models and its observation functions.
Exposes several convient methods of the individual classes in this module.
"""
def __init__(self,
bioprocess_model_class, model_parameters, states:list=None, initial_values:dict=None, replicate_ids:list=None,
initial_switches:list=None, model_name:str=None, observation_functions_parameters:List[tuple]=None,
model_checking_assistance:bool=True,
):
"""
Arguments
---------
bioprocess_model_class : Subclass of BioprocessModel
This class implements the bioprocess model.
model_parameters : list or dict
The model parameters, as specified in `bioprocess_model_class`.
Keyword arguments
-----------------
states : list
The model states, as specified in `bioprocess_model_class`.
Default is none, which enforces `initial_values` not to be None.
initial_values : dict
Initial values to the model, keys must match the states with a trailing '0'.
Default is None, which enforces `states` not to be None.
replicate_ids : list
Unique ids of replicates, for which the full model applies.
The parameters, as specified for the model and observation functions are considered as global ones,
which may have different names and values for each replicate.
Default is None, which implies a single replicate model.
initial_switches : list
A list of booleans, indicating the initial state of switches.
Number of switches must correpond to the number of return events in method `state_events`,
if this method is implemented by the inheriting class.
Default is None, which enables auto-detection of initial switches, which all will be False.
model_name : str
A descriptive model name.
Default is None.
observation_funtions_parameters : list of tuples
Each tuple stores a subclass of ObservationFunction
and a dictionary of its correponding parametrization.
Default is None, which implies that there are no ObservationFunctions.
model_checking_assistance : bool
Runs a few sanity and call checks on the implemented model
"""
# store arguements for later use
self.__bioprocess_model_class = bioprocess_model_class
self.__model_parameters = model_parameters
self.__states = states
self.__initial_values = initial_values
self.__replicate_ids = replicate_ids
self.__initial_switches = initial_switches
self.__model_name = model_name
self.__observation_functions_parameters = observation_functions_parameters
self.replicate_ids = replicate_ids
if model_name is None:
self.model_name = bioprocess_model_class.__name__
else:
self.model_name = model_name
# Create an ExtendendSimulator instance for each replicate id
model_checker = ModelChecker()
self.simulators = {}
for _replicate_id in self.replicate_ids:
if _replicate_id is None:
_model_name = model_name
else:
_model_name = f'{model_name}_{_replicate_id}'
_simulator = ExtendedSimulator(
bioprocess_model_class,
model_parameters,
states,
initial_values,
initial_switches,
_model_name,
observation_functions_parameters,
_replicate_id,
)
if model_checking_assistance:
if not model_checker.check_model_consistency(copy.deepcopy(_simulator)):
warnings.warn(f'There might by some issues for {_model_name} with replicate_id {_replicate_id}')
self.simulators[_replicate_id] = _simulator
# Create a ParameterManager object
self._parameter_manager = ParameterManager(
self.replicate_ids,
self.simulators[self.replicate_ids[0]].get_all_parameters(),
)
self.optimizer_kwargs = None
#%% Properties
@property
def replicate_ids(self) -> list:
return self._replicate_ids
@replicate_ids.setter
def replicate_ids(self, value):
if value is None:
self._replicate_ids = [SINGLE_ID]
else:
if not Helpers.has_unique_ids(value):
raise ValueError(Messages.non_unique_ids)
self._replicate_ids = value
@property
def parameter_mapping(self):
return self._parameter_manager.parameter_mapping
@property
def optimizer_kwargs(self) -> dict:
return self._optimizer_kwargs
@optimizer_kwargs.setter
def optimizer_kwargs(self, value):
if value is not None and not isinstance(value, dict):
raise ValueError('Optimizer kwargs must be either `None` or a dictionary')
self._optimizer_kwargs = value
#%% Public methods
def add_replicate(self, replicate_id:str, mappings:List[ParameterMapper]=None):
"""
Adds another replicate to the multi model Caretaker object.
Arguments
---------
replicate_id : str
The new replicate_id to be added.
Keyword arguments
-----------------
mappings : list of ParameterMapper or tuple
A list parameter mappings that should be applied to the new replicate_id.
Default is None, which implies that the local parameters names for the new replicate correspond to the global names.
Raises
------
AttributeError
In case the Caretaker object was created without explicit `replicate_ids` argument.
ValueError
The new replicate_id is not unique including the extisting ones.
KeyError
Any of the `mappings` aims not for the new replicate_id.
"""
# store current parameter mapping
_parameter_mappers = self._parameter_manager.get_parameter_mappers()
_parameters = self._get_all_parameters()
if len(self.replicate_ids) == 1 and self.replicate_ids[0] is None:
raise AttributeError('Cannot add replicate_id to implicitly defined single replicate Caretaker object')
_updated_replicate_ids = copy.deepcopy(self.replicate_ids)
_updated_replicate_ids.append(replicate_id)
if not Helpers.has_unique_ids(_updated_replicate_ids):
raise ValueError(Messages.non_unique_ids)
if mappings is not None:
for _mapping in mappings:
if _mapping.replicate_id != replicate_id:
raise KeyError('The given mapping does not aim for the new replicate')
self.__init__(
bioprocess_model_class=self.__bioprocess_model_class,
model_parameters=self.__model_parameters,
states=self.__states,
initial_values=self.__initial_values,
replicate_ids=_updated_replicate_ids,
initial_switches=self.__initial_switches,
model_name=self.__model_name,
observation_functions_parameters=self.__observation_functions_parameters,
)
self.set_parameters(_parameters)
self.apply_mappings(_parameter_mappers)
if mappings is not None:
self.apply_mappings(mappings)
def simulate(self, t:numpy.ndarray, parameters:dict=None, verbosity:int=40, reset_afterwards:bool=False, suppress_stdout:bool=True) -> list:
"""
Runs a forward simulation for the fully specified model and its observation functions (if specified).
Arguments
---------
t : numpy.ndarray or float
The time points for integration. In case a single time point is provided,
the solver will treat this as final integration time and chooses the intermediate steps on its own.
Keyword arguments
-----------------
parameters : dict
In case a simulation for specific parameter values is wanted.
Default is None.
verbosity : int
Prints solver statistics (quiet = 50, whisper = 40, normal = 30, loud = 20, scream = 10).
Default is 30.
suppress_stdout : bool
No printouts of integrator warnings, which are directed to stdout by the assimulo package.
Set to False for model debugging purposes.
Default is True.
Returns
-------
simulations : list
The collection of simulations results as list of ModelState or Observation objects.
"""
if parameters is not None:
_original_parameters = self._get_all_parameters()
self.set_parameters(parameters)
simulations = []
for _id in self.simulators.keys():
_simulator = self.simulators[_id]
simulations.extend(_simulator.simulate(t=t, verbosity=verbosity, reset_afterwards=reset_afterwards, suppress_stdout=suppress_stdout))
if parameters is not None:
self.set_parameters(_original_parameters)
return simulations
def estimate(self,
unknowns:dict, measurements:List[Measurement], bounds:List[Tuple]=None, metric:str='negLL', use_global_optimizer:bool=None,
report_level:int=0, reset_afterwards:bool=False, handle_CVodeError:bool=True, optimizer_kwargs:dict=None,
) -> Tuple[dict, dict]:
"""
Estimates values for requested unknowns according to a specific metric, given some measurements.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
Providing a list of valid unknowns causes the use of scipy's differential evolution global optimizer.
A dictionary with parameter:guess as key-value pairs is needed to use the local but faster minimizer.
measurements : List[Measurement]
The data from which the parameter estimation is performed.
Can provide a Measurement object for any model state or observation.
Keyword arguments
-----------------
bounds : list of tuples
Bounds for for each unknown to be estimated.
Must be provided for use with differential evolution optimizer.
Default is None.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
use_global_optimizer : bool
Enforce the use of differential evolution optimizer.
Default is None, which makes this decision based on the the type of `unknowns` and `bounds`.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = prints estimated parameters and runtime of the estimation job.
2 = prints additionally the `OptimizeResult` result object, as returned by the optimizer
3 = prints additionally handled CVodeErrors, which arise from toxic parameters.
This has only effect in case `handle_CVodeError` is True
4 = prints additionally the progress of the optimizer.
reset_afterwards : bool
To reset the Caretaker object after the estimation has finished.
Default is False.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
estimations : dict
Key-value pairs of the unknowns and corresponding estimated values.
estimation_info : dict
Additional information about the estimation job.
Raises
------
KeyError
Non-unique unknowns are provided.
ValueError
Invalid unknowns shall be estimated.
ValueError
No bounds are provided for use of differential evolution optimizer.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
The `optimizer_kwargs` argument is not None.
"""
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
_start = time.time()
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
# test if parameters are estimated that are not known to the model
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
if use_global_optimizer == False and not isinstance(unknowns, dict):
raise ValueError('Must provide initial guesses to use the local minimizer')
# sort unknowns and corresponding bounds alphabetically and case-insensitive
# Decide also whether to use the local or global minimizer
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
if isinstance(unknowns, dict):
_unknowns = {_unknown_name : unknowns[_unknown_name] for _unknown_name in _unknowns_names_sorted}
elif isinstance(unknowns, list):
_unknowns = {_unknown_name : None for _unknown_name in _unknowns_names_sorted}
if use_global_optimizer is None:
use_global_optimizer = True
if use_global_optimizer and bounds is None:
raise ValueError(Messages.missing_bounds)
if bounds is not None:
try:
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
except AttributeError:
_bounds = [bounds[list(unknowns.keys()).index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
# To protect for integration error when a bound is integer 0
_bounds = Helpers.bounds_to_floats(_bounds)
else:
_bounds = None
# Check for optimizer_kwargs to be used
_optimizer_kwargs = {}
if self.optimizer_kwargs is None:
_warning_flag = False
else:
_optimizer_kwargs.update(self.optimizer_kwargs)
_warning_flag = True
# check if the keyword argument `optimizer_kwargs` was set, which has a higher priority over the corresponding Caretaker property
if optimizer_kwargs is not None:
_optimizer_kwargs = optimizer_kwargs
if _warning_flag:
warnings.warn('Using the `optimizer_kwargs` keyword argument overrides the Caretaker property `optimizer_kwargs`.', UserWarning)
if report_level >= 3:
verbosity_CVodeError = True
else:
verbosity_CVodeError = False
if report_level >= 4 and 'disp' not in _optimizer_kwargs.keys():
_optimizer_kwargs['disp'] = True
if use_global_optimizer:
minimizer_scope = 'differential evolution optimizer'
if 'popsize' not in _optimizer_kwargs.keys():
popsize = 5*len(_unknowns)
_optimizer_kwargs['popsize'] = popsize
opt = differential_evolution(self._loss_fun_scipy,
bounds=_bounds,
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
**_optimizer_kwargs,
)
else:
minimizer_scope = 'local minimizer'
if 'disp' in _optimizer_kwargs.keys():
options = {'disp' : _optimizer_kwargs['disp']}
del _optimizer_kwargs['disp']
_optimizer_kwargs['options'] = options
opt = minimize(self._loss_fun_scipy,
list(_unknowns.values()),
args=(_unknowns,
metric,
measurements,
handle_CVodeError,
verbosity_CVodeError,
),
bounds=_bounds,
**_optimizer_kwargs,
)
# Preparing returns
estimations = {_unknown : value for _unknown, value in zip(_unknowns, opt.x)}
estimation_info = {}
estimation_info['opt_info'] = opt
if metric in list(PRETTY_METRICS.keys()):
estimation_info['metric'] = PRETTY_METRICS[metric]
else:
estimation_info['metric'] = metric
estimation_info['loss'] = opt.fun
_end = time.time()
estimation_info['runtime_min'] = (_end - _start)/60
if report_level >= 1:
print(f'\n----------Results from {minimizer_scope}')
print('\nEstimated parameters:')
for estimation in estimations.keys():
print(f'{estimation}: {estimations[estimation]}')
print(f'\nRuntime was {estimation_info["runtime_min"]:.2f} min')
if report_level >= 2:
print('\n----------')
print(opt)
return estimations, estimation_info
def estimate_parallel(self,
unknowns:list, measurements:List[Measurement], bounds:List[Tuple],
metric:str='negLL', report_level:int=0,
optimizers:List[str]='de1220', optimizers_kwargs:List[dict]={}, log_each_nth_gen:int=None,
rel_pop_size:float=10.0, evolutions:int=5, archipelago_kwargs:dict={},
atol_islands:float=None, rtol_islands:float=1e-6,
max_runtime_min:float=None,
max_evotime_min:float=None,
max_memory_share:float=0.95,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> Tuple[dict, ParallelEstimationInfo]:
"""
Estimates values for requested unknowns according to a specific metric, given some measurements,
using the generalized island model for parallelization that allows for global optimization.
This is provided by the pygmo package, which runs parallel evolutions of populations,
with migration of improved variants between the populations occuring.
For further info and use of pygmo, see https://github.com/esa/pygmo2, doi:10.5281/zenodo.3603747.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The data from which the parameter estimation is performed.
Can provide a Measurement object for any model state or observation.
bounds : list of tuples
Bounds for for each unknown to be estimated, in the following form [(lower, upper), ...]
Keyword arguments
----------------
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), `SS` (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
report_level : int
Enables informative output about the estimation process. Information will be printed after each evolution.
Default is 0, which is no output.
1 = Prints the best loss, as well as information about archipelago creation and evolution.
For each completed evolution, a dot is printed.
2 = Prints additionally the best loss after each evolution
3 = Prints additionally average loss among all islands, and the runtime of each evolution.
4 = Prints additionally the parameter values for the best loss, and the average parameter values
among the champions of all islands in the `archipelago` after the evolutions.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `[{}]`, i.e. no additional optimizer kwargs.
log_each_nth_gen : int
Specifies at which each n-th generation the algorithm stores logs.
Can be later extracted from the returned `archipelago` instance.
Note that only the log from the last evolution is stored in the archipelago.
Default is None, which disables logging.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
max_runtime_min : float
The maximun time in min the estimation process may take. The current runtimee is evaluated after each completion of an evolution.
Default is None, which implies there is no maximum runtime for the estimation process.
max_evotime_min : float
The maximum cumulative pure evolution time the estimation process is allowed to take.
In contrast to the `max_runtime_min` stopping criterion, only the evolution runtime is considered,
without runtime needed for checking stopping criteria, reporting prints outs between each evolution, etc.
Default is None.
max_memory_share : float
Defines the allowed memory share in usage, for which no evolutions are run anymore.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
Returns
-------
best_estimates : dict
The estimated parameters, according to the best champion among all populations of the archipelago, aftet the last evolution.
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions.
Needed to continue an estimation process.
Raises
------
KeyError
Non-unique unknowns are provided.
ValueError
Invalid unknowns shall be estimated.
"""
_start = time.time()
if len(unknowns) != len(bounds):
raise ValueError('Number of unkowns does not match number of pairs of upper and lower bounds.')
if not isinstance(optimizers, list) and isinstance(optimizers, str):
optimizers = [optimizers]
if not isinstance(optimizers_kwargs, list) and isinstance(optimizers_kwargs, dict):
optimizers_kwargs = [optimizers_kwargs]
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
# test if parameters are estimated that are not known to the model
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
# sort unknowns and corresponding bounds
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
_unknowns = [_unknown_name for _unknown_name in _unknowns_names_sorted]
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
if report_level >= 5:
_verbosity_CVodeError = True
else:
_verbosity_CVodeError = False
# get the problem
pg_problem = pygmo.problem(
loss_calculator(
unknowns=_unknowns,
bounds=_bounds,
metric=metric,
measurements=measurements,
caretaker_loss_fun=self.loss_function,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=_verbosity_CVodeError,
),
)
# get the archipelago
archipelago = ArchipelagoHelpers.create_archipelago(
_unknowns,
optimizers,
optimizers_kwargs,
pg_problem,
rel_pop_size,
archipelago_kwargs,
log_each_nth_gen,
report_level,
)
archipelago.problem = loss_calculator
estimation_result = ParallelEstimationInfo(archipelago=archipelago)
return self.estimate_parallel_continued(
estimation_result=estimation_result,
evolutions=evolutions,
report_level=report_level,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
max_runtime_min=max_runtime_min,
max_evotime_min=max_evotime_min,
max_memory_share=max_memory_share,
start_time=_start,
)
def estimate_parallel_continued(self,
estimation_result:ParallelEstimationInfo, evolutions:int=1, report_level:int=0,
atol_islands:float=None, rtol_islands:float=1e-6,
max_runtime_min:float=None,
max_evotime_min:float=None,
max_memory_share:float=0.95,
start_time:float=None,
) -> Tuple[dict, ParallelEstimationInfo]:
"""
Continues a parallel parameter estimation job by running more evolutions on a corresponding archipelago object.
Arguments
---------
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions as returned by method 'estimate_parallel'.
Needed to continue an estimation process.
Keyword arguments
-----------------
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after an evolution.
report_level : int
Enables informative output about the estimation process. Information will be printed after each evolution.
Default is 0, which is no output.
1 = Prints the best loss, as well as information about archipelago creation and evolution.
For each completed evolution, a dot is printed.
2 = Prints additionally the best loss after each evolution
3 = Prints additionally average loss among all islands, and the runtime of each evolution.
4 = Prints additionally the parameter values for the best loss, and the average parameter values
among the champions of all islands in the `archipelago` after the evolutions.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
max_runtime_min : float
The maximum runtime in min the estimation process is allowed to take.
The current runtime is evaluated after each completion of an evolution.
Default is None, which implies that there is no time limit for the estimation process.
max_evotime_min : float
The maximum cumulative pure evolution time the estimation process is allowed to take.
In contrast to the `max_runtime_min` stopping criterion, only the evolution runtime is considered,
without runtime needed for checking stopping criteria, reporting prints outs between each evolution, etc.
Default is None.
max_memory_share : float
Defines the allowed memory share in usage, for which no evolutions are run anymore.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
start_time : float
In case a total runtime, started from another method, shall be reported.
Default is None, which measured the total run time only within this method.
Returns
-------
best_estimates : dict
The estimated parameters, according to the best champion among all populations of the archipelago, aftet the last evolution.
estimation_result : ParallelEstimationInfo
Stores the archipelago and the history of the previous evolutions.
Needed to continue an estimation process.
Raises
------
ValueError
Not all island of the archipelago have the same unknowns.
"""
if start_time is None:
start_time = time.time()
archipelago = estimation_result.archipelago
evolutions_trail = estimation_result.evolutions_trail.copy()
if report_level >= 1:
if not evolutions_trail['evo_time_min']:
_filler = ''
else:
_filler = 'additional '
print(f'Running {_filler}{evolutions} evolutions for all {len(archipelago)} islands of the archipelago...\n')
_current_evotime_min = 0
for _evolution in range(1, evolutions+1):
_evo_start = time.time()
archipelago.evolve()
archipelago.wait_check()
_evo_end = time.time()
_evo_time_min = (_evo_end - _evo_start)/60
_current_evotime_min += _evo_time_min
best_estimates, best_loss, estimates_info = ArchipelagoHelpers.extract_archipelago_results(archipelago)
evolutions_trail['evo_time_min'].append(_evo_time_min)
evolutions_trail['best_losses'].append(best_loss)
evolutions_trail['best_estimates'].append(best_estimates)
evolutions_trail['estimates_info'].append(estimates_info)
if report_level == 1:
if _evolution % 120 == 0:
end = '\n'
else:
end = ''
print('.', end=end)
elif report_level >= 2:
ArchipelagoHelpers.report_evolution_result(evolutions_trail, report_level)
# evaluate stopping criteria after each evolution
_current_runtime_min = (time.time() - start_time)/60
stopping_criteria = ArchipelagoHelpers.check_evolution_stop(
current_losses=estimates_info['losses'],
atol_islands=atol_islands,
rtol_islands=rtol_islands,
current_runtime_min=_current_runtime_min,
max_runtime_min=max_runtime_min,
current_evotime_min=_current_evotime_min,
max_evotime_min=max_evotime_min,
max_memory_share=max_memory_share,
)
if any(stopping_criteria.values()):
if report_level >= 1:
print(f'\nReached a stopping criterion after evolution {len(evolutions_trail["evo_time_min"])}:')
for _st in stopping_criteria:
print(f'{_st}: {stopping_criteria[_st]}')
early_stop = True
break
else:
early_stop = False
if report_level >= 1:
if not early_stop:
print(f'\nCompleted {_evolution} {_filler}evolution runs.')
print('\nEstimated parameters:')
for p in best_estimates:
print(f'{p}: {best_estimates[p]}')
print('')
ArchipelagoHelpers.report_evolution_result(evolutions_trail, report_level=3)
_runtime_min = (time.time() - start_time)/60
evolutions_trail['cum_runtime_min'].append(_runtime_min)
if report_level >= 1:
if _runtime_min/60 > 1:
print(f'\nTotal runtime was {_runtime_min/60:.2f} h\n')
else:
print(f'\nTotal runtime was {_runtime_min:.2f} min\n')
estimation_result = ParallelEstimationInfo(archipelago=archipelago, evolutions_trail=evolutions_trail)
return best_estimates, estimation_result
def estimate_parallel_MC_sampling(self,
unknowns:list,
measurements:List[Measurement],
bounds:List[Tuple],
mc_samples:int=25,
reuse_errors_as_weights:bool=True,
metric:str='negLL',
report_level:int=0,
optimizers:List[str]='de1220',
optimizers_kwargs:List[dict]={},
rel_pop_size:float=10.0,
evolutions:int=25,
archipelago_kwargs:dict={},
atol_islands:float=None,
rtol_islands:float=1e-6,
n_islands:int=4,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
jobs_to_save:int=None,
max_memory_share:float=0.95,
) -> pandas.DataFrame:
"""
Performs Monte-Carlo sampling from measurements to create new measurements, according to the statitical distribution of the respective Measurement objects.
For each newly created measurement, the requested unknowns (parameters) are estimated, resulting in an empirical distribution of parameter values.
these empirical distributions for the parameters can be assessed for uncertainties and correlations.
For each MC sample, a parallel estimation procedure is carried out, for details see methods `estimate_parallel` and `estimate_parallel_continued`.
Depending on the available number of CPUs on your machine, these estimation procedure are run in parallel.
The selection of suitable hyperparameters, e.g. which optimizers, etc., use method `estimate_parallel` and refer to corresponding Jupyter notebooks.
NOTE: To increase the number of MC samples to an arbiratry high number, set `repeats_to_save` argument.
Afterwards, the results saved to disk can be read and merged.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
Keyword arguments
-----------------
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 25.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 = Prints additionally the runtime of each batch, as well as as summary for the obtained parameter distributions.
4 = Prints additionally the reason for a MS sample to finish (i.e.,
whether convergence or the maximum number of evolutions was reached).
5 = Prints additionally information on the creation of the archipelagos for each batch
6 = Prints additionally the current evolution for each MC samples, and report any handled integration error.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `{}`, i.e. no additional optimizer kwargs.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
n_islands : int
Specifies the number of parallel estimations per MC samples for all archipelagos in an estimation batch.
In case a list of optimizers is provided, the number of islands is implicitly defined by its length.
Must use values > 1.
Default is 4.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
jobs_to_save : int
Set to repeatedly run the specifified number of MC samples and to save the results from each repeat to file.
Default is None, which causes no result storage ti file.
max_memory_share : float
Defines the allowed memory share in usage, for which no repeats are run anymore. Has only effect if `jobs_to_save` is not None.
Default is 0.95, meaning that repeat are only run if used memory share is less than 95 %.
Returns
-------
estimates : pandas.DataFrame
The values from repeated estimation for the requested unknowns.
Only converged estimations are included.
Raises
------
AttributeError
Measurements have no errors.
ValueError
Degree of archipelago parallelization is < 2.
TypeError
A list containing not only Measurement objects is provided.
KeyError:
Non-unique unknowns detected.
ValueError:
Invalid parameters shall be estimated.
"""
if jobs_to_save is None:
_estimate = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
return pandas.DataFrame.from_dict(_estimate)
_estimate_batches = []
session_id = int(time.monotonic())
for i in range(1, jobs_to_save+1):
curr_memory_share = psutil.virtual_memory().percent/100
if curr_memory_share > max_memory_share:
print(f'Cannot run MC estimation job due to low memory: {(1-curr_memory_share)*100:.2f} % free memory left')
else:
_estimate_batch = self._estimate_parallel_MC_sampling(
unknowns=unknowns,
measurements=measurements,
bounds=bounds,
mc_samples=mc_samples,
reuse_errors_as_weights=reuse_errors_as_weights,
metric=metric,
report_level=report_level,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
rel_pop_size=rel_pop_size,
evolutions=evolutions,
archipelago_kwargs=archipelago_kwargs,
atol_islands=atol_islands,
rtol_islands=rtol_islands,
n_islands=n_islands,
handle_CVodeError=handle_CVodeError,
loss_calculator=loss_calculator,
)
_filename = f'{self.model_name}_MC-sample-estimates_session-id-{session_id}_job-{i}.xlsx'
_df = pandas.DataFrame.from_dict(_estimate_batch)
_estimate_batches.append(_df)
_df.to_excel(_filename)
if report_level > 0:
print(f'Current memory usage is {psutil.virtual_memory().percent:.2f} %.\nSaved results of job #{i} to file: {_filename}\n')
return pandas.concat(_estimate_batches, ignore_index=True)
def _estimate_parallel_MC_sampling(self,
unknowns:list,
measurements:List[Measurement],
bounds:List[Tuple],
mc_samples:int=25,
reuse_errors_as_weights:bool=True,
metric:str='negLL',
report_level:int=0,
optimizers:List[str]='de1220',
optimizers_kwargs:List[dict]={},
rel_pop_size:float=10.0,
evolutions:int=25,
archipelago_kwargs:dict={},
atol_islands:float=None,
rtol_islands:float=1e-6,
n_islands:int=4,
handle_CVodeError:bool=True,
loss_calculator:LossCalculator=LossCalculator,
) -> dict:
"""
Performs Monte-Carlo sampling from measurements to create new measurements, according to the statitical distribution of the respective Measurement objects.
For each newly created measurement, the requested unknowns (parameters) are estimated, resulting in an empirical distribution of parameter values.
these empirical distributions for the parameters can be assessed for uncertainties and correlations.
For each MC sample, a parallel estimation procedure is carried out, for details see methods `estimate_parallel` and `estimate_parallel_continued`.
Depending on the available number of CPUs on your machine, these estimation procedure are run in parallel.
The selection of suitable hyperparameters, e.g. which optimizers, etc., use method `estimate_parallel` and refer to corresponding Jupyter notebooks.
NOTE: To increase the number of MC samples to an arbiratry high number, run this method several times and store intermediate results.
Afterwards, these can be merged.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : dict or list
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
measurements : List[Measurement]
The measurements from which the parameters are to be estimated.
bounds : List[tuple]
List of tuples (lower, upper), one tuple for each parameter. Must be provided when using the global optimizer.
Default is None.
Keyword arguments
-----------------
mc_samples : int
The number of MC samples that shall be drawn from the measurement data.
Default is 25.
reuse_errors_as_weights : bool
Uses the measurement errors as weights for each set of measurement samples drawn.
Default is True.
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement objects are accordingly specified.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = Prints a dot for each processing batch, the total runtime and the ratio of samples that reached convergence.
2 = Prints likewise for 1, but with more information on each batch.
3 = Prints additionally the runtime of each batch, as well as as summary for the obtained parameter distributions.
4 = Prints additionally the reason for a MS sample to finish (i.e.,
whether convergence or the maximum number of evolutions was reached).
5 = Prints additionally information on the creation of the archipelagos for each batch
6 = Prints additionally the current evolution for each MC samples, and report any handled integration error.
optimizers : List[str] or str
A list of names for the pygmo optimization algorithms of choice. For a list of such to be conveniently used,
see `PygmoOptimizers` class of this module.
In case a list with one item is used, this optimizer is used for all explicitly
or implicitly (default None of `n_islands`) defined nunmber islands.
In case a list with >1 optimizers is used, the corresponding number of islands will be created within the archipelago.
The currently supported list of optimizer can be found at pyfoomb.generalized_islands.PygmoOptimizers.optimizers
Default is `de1220`, which makes each island to use this algorithm.
optimizers_kwargs : List[dict] or dict
A list of optimizer_kwargs as dicts, corresponding to the list of optimizers.
In case more >1 optimizers are used, the 1-item list of optimizer_kwargs will be applied to all of the optimizers.
Default is `{}`, i.e. no additional optimizer kwargs.
rel_pop_size : float
Determines the population size on each island, relative to the number of unknown to be estimated,
i.e. pop_size = rel_pop_size * len(unknowns), rounded to the next integer.
Default is 10, which creates population sizes 10 times the number of unknowns.
evolutions : int
Defines how often the populations on the islands are evolved.
Migrations between the populations of the islands occur after each finished evolution.
Migration depends of the topology of the archipelago, as well as the defined migration polices,
which are parts of `archipelago_kwargs`.
Default is 5, which triggers five rounds of evolution.
archipelago_kwargs : dict
The keyword arguments for instantiation of the archipelago.
In case `archipelago_kwargs` has no key `t`, the `pygmo.fully_connected()` topology will be used
Default is {}, i.e. an empty dictionary, which implies the use of `pygmo.fully_connected()` topology.
atol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is None, which implies no effect for this argument.
rtol_islands : float
Defines a stopping criterion that is checked after each evolution.
If the std of the islands' losses < atol_islands + rtol_islands * abs(mean(islands' losses)), then the optimization is stopped.
Default is 1e-6.
n_islands : int
Specifies the number of parallel estimations per MC samples for all archipelagos in an estimation batch.
In case a list of optimizers is provided, the number of islands is implicitly defined by its length.
Must use values > 1.
Default is 4.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
loss_calculator : LossCalculator
By subclassing `LossCalculator`, user-defined constraints can be implemented. The resulting subclass needs to be provided.
Default is LossCalculator, which implements no additional constraints
Returns
-------
estimates : dict
The values from repeated estimation for the requested unknowns.
Only converged estimations are included.
estimates_info : dict
Contains additional information for all finished jobs.
Raises
------
AttributeError
Measurements have no errors.
ValueError
Degree of archipelago parallelization is < 2.
TypeError
A list containing not only Measurement objects is provided.
KeyError:
Non-unique unknowns detected.
ValueError:
Invalid parameters shall be estimated.
"""
_start = time.time()
# Some input error checkings
if len(unknowns) != len(bounds):
raise ValueError('Number of unkowns does not match number of pairs of upper and lower bounds.')
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if not Helpers.all_measurements_have_errors(measurements):
raise AttributeError('Measurements must have errors')
if not Helpers.has_unique_ids(unknowns):
raise KeyError(Messages.bad_unknowns)
_valid_unknowns = self._get_valid_parameter_names()
for _unknown in unknowns:
if _unknown not in _valid_unknowns:
raise ValueError(f'Detected invalid unknown to be estimated: {_unknown} vs. {_valid_unknowns}')
# Sort unknowns and corresponding bounds
_unknowns_names_sorted = sorted(unknowns, key=str.lower)
_unknowns = [_unknown_name for _unknown_name in _unknowns_names_sorted]
_bounds = [bounds[unknowns.index(_unknown_name)] for _unknown_name in _unknowns_names_sorted]
if report_level >= 6:
_verbosity_CVodeError = True
else:
_verbosity_CVodeError = False
# Check whether to use the same optimizer for all islands or a list of optimizers
if isinstance(optimizers, str):
optimizers = [optimizers]*n_islands
elif isinstance(optimizers, list):
n_islands = len(optimizers)
if isinstance(optimizers_kwargs, dict):
optimizers_kwargs = [optimizers_kwargs]*n_islands
if n_islands < 2:
raise ValueError('Must use at least 2 islands per archipelago, either by specifying `n_islands` or using a list with more than 1 optimizers for kwargs `optimizers`.')
if atol_islands is None:
atol_islands = 0.0
if rtol_islands is None:
rtol_islands = 0.0
# Calculate the number of archipelagos that can be run in parallel
n_archis = max([int(numpy.floor(joblib.cpu_count()/n_islands)), 1])
# Collects all finished estimation jobs
_mc_estimates = []
# Run parallel estimation jobs batch-wise
for i in range(1, mc_samples+1, n_archis):
_batch_no = int((i-1+n_archis)/n_archis)
if report_level == 1:
if _batch_no % 120 == 0:
end = '\n'
else:
end = ''
print('.', end=end)
elif report_level >= 2:
if report_level >= 3:
_insert = '\n'
else:
_insert = ''
_current_samples = [_sample for _sample in range(i, _batch_no*n_archis+1) if _sample <= mc_samples]
_first = _current_samples[0]
_last = f' to {_current_samples[-1]}' if _current_samples[-1] != _first else ''
print(f'{_insert}------- Starting batch #{_batch_no} for MC sample {_first}{_last}.')
# Initialize the current batch
_batch_start = time.time()
mc_count = i
active_archis = []
# Create a batch of achipelagos
for j in range(n_archis):
if mc_count+j > mc_samples:
break
# Create the problem for a MC sample
_pg_problem = pygmo.problem(
loss_calculator(
unknowns=_unknowns,
bounds=_bounds,
metric=metric,
measurements=self._draw_measurement_samples(measurements, reuse_errors_as_weights),
caretaker_loss_fun=self.loss_function,
handle_CVodeError=handle_CVodeError,
verbosity_CVodeError=_verbosity_CVodeError,
)
)
# Create the archipelago for the current problem
_archi = ArchipelagoHelpers.create_archipelago(
unknowns=_unknowns,
optimizers=optimizers,
optimizers_kwargs=optimizers_kwargs,
pg_problem=_pg_problem,
rel_pop_size=rel_pop_size,
archipelago_kwargs=archipelago_kwargs,
log_each_nth_gen=None,
report_level=0,
)
_archi.mc_info = f'MC sample #{mc_count+j}'
_archi.finished = False
_archi.problem = loss_calculator
_archi.wait_check()
if report_level >= 5:
print(f'{_archi.mc_info}: created archipelago with {len(_archi)} islands')
active_archis.append(_archi)
# Evolve the archipelagos in the current batch
for j in range(len(active_archis)):
for evo in range(1, evolutions+1):
# Start an async evolution for all non-converged archis
for _archi in active_archis:
if not _archi.finished:
if report_level >= 6:
print(f'\t{_archi.mc_info}: running evolution {evo}')
_archi.evolve()
# Wait for all archis to finish
for _archi in active_archis:
if not _archi.finished:
_archi.wait_check()
# Check the archis for results
for _archi in active_archis:
# Calculate convergence criterion
_losses = numpy.array(_archi.get_champions_f()).flatten()
_stop_criterion = atol_islands + rtol_islands * numpy.abs(numpy.mean(_losses))
_abs_std = numpy.std(_losses, ddof=1)
# Check for convergence for the non-finished archi
if _abs_std < _stop_criterion and not _archi.finished:
_best_estimates = ArchipelagoHelpers.estimates_from_archipelago(_archi)
_best_estimates['convergence'] = True
_best_estimates['max_evos'] = False
_best_estimates['archi'] = f'{_archi.mc_info}'
_best_estimates['losses'] = _losses
_archi.finished = True
_mc_estimates.append(_best_estimates)
if report_level >= 4:
print(f'{_archi.mc_info}: convergence')
# Check for max evolutions for the non-finished archi
elif evo == evolutions and not _archi.finished:
_best_estimates = ArchipelagoHelpers.estimates_from_archipelago(_archi)
_best_estimates['convergence'] = False
_best_estimates['max_evos'] = True
_best_estimates['archi'] = f'{_archi.mc_info}'
_best_estimates['losses'] = _losses
_archi.finished = True
_mc_estimates.append(_best_estimates)
if report_level >= 4:
print(f'{_archi.mc_info}: no convergence after max. evolutions ({evo}).')
if all([_archi.finished for _archi in active_archis]):
del active_archis
gc.collect()
active_archis = []
break
if all([_archi.finished for _archi in active_archis]):
del active_archis
gc.collect()
active_archis = []
break
if report_level >= 3:
print(f'Runtime for batch {_batch_no} was {(time.time() - _batch_start)/60:.2f} min.')
# All requested MC samples were run
if mc_count > mc_samples:
break
# Comprehend results
aug_unknowns = [*_unknowns, 'convergence', 'max_evos', 'archi', 'losses']
estimates_info = {str(_p) : [] for _p in aug_unknowns}
for _mc_estimate in _mc_estimates:
for _p in _mc_estimate:
estimates_info[_p].append(_mc_estimate[_p])
estimates = {
str(_p) : numpy.array(numpy.array(estimates_info[_p])[estimates_info['convergence']]) # include only samples that converged
for _p in _unknowns
}
if report_level >= 1:
_end = time.time()
# Runtime was > 1 h
if (_end-_start)/3600 > 1:
print(f'\n-----------------------------------------------\nTotal runtime was {(_end-_start)/3600:.2f} h.')
else:
print(f'\n-----------------------------------------------\nTotal runtime was {(_end-_start)/60:.2f} min.')
print(f'Convergence ratio was {numpy.sum(estimates_info["convergence"])/len(estimates_info["convergence"])*100:.1f} %.')
if report_level >= 3:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print(pandas.DataFrame(estimates).describe().T)
return estimates
def estimate_repeatedly(self,
unknowns:list, measurements:List[Measurement], bounds:List[tuple], metric:str='negLL',
jobs:int=10, rel_jobs:float=None, report_level:int=0, reset_afterwards:bool=False, handle_CVodeError:bool=True,
) -> Tuple[dict, list]:
"""
Runs a several global estimations for the requested unknowns. Resulting distributions for the estimated parameters
can be inspected for measures of dispersion or correlations among parameters. In case a rather high number of estimation jobs is run,
the resulting distributions can be nicely investigated using the Visualization.show_parameter_distributions method.
NOTE: This method puts considerable computational load on your machine.
Arguments
---------
unknowns : list or dict
The parameters to be estimated. Can be any of the model parameters, initial values or observation parameters.
In case a dict is provided, the corresponding values are ignored.
measurements : List[Measurement]
The data, from which the repeated estimation is performed.
Can provide a Measurement object for any model state or observation.
bounds : List[Tuple]
Bounds for for each unknown to be estimated.
Keyword arguments
-----------------
metric : str
The metric according to which the loss to be minimized is calculated.
Can be one of, e.g. `negLL` (negative log-likelihood), 'SS' (sum of squares), or `WSS` (weighted sum of squares).
Default is `negLL`, which implies that the corresponding Measurement object are accordingly specified.
jobs : int
The number of estimation jobs that are requested.
Default is 10.
rel_jobs : float
Number of estimation jobs, relative to the number of unknowns: rel_jobs * number of unknowns.
Overrides jobs argument. Default is None, which implies use of `jobs`.
report_level : int
Enables informative output about the estimation process.
Default is 0, which is no output.
1 = prints a summary of the empirical parameter distributions and basic information about the parallization.
2 = reports additionally about each parallel estimation job.
reset_afterwards : bool
To reset the Caretaker object after the estimation has finished.
Default is False.
handle_CVodeError : bool
Catches CVodeError raised by the solver, in order to not interrupt the estimations for toxic parameter values.
Default is True.
Returns
-------
repeated_estimates : dict
The values from repeated estimation for the requested unknowns.
results : list
Contains estimation_info dicts for each estimation job.
TypeError
A list containing not only Measurement objects is provided.
Warns
-----
UserWarning
Property `optimizer_kwargs` of this Caretaker instance has key `disp`.
"""
warnings.warn(
'This method will be deprecated in future releases of pyFOOMB.',
PendingDeprecationWarning
)
for _item in measurements:
if not isinstance(_item, Measurement):
raise TypeError('Must provide a list of Measurement objects')
if isinstance(unknowns, list):
unknowns = {unknown : None for unknown in unknowns}
# rel_jobs overrides jobs
if rel_jobs is not None:
jobs = int(numpy.ceil(rel_jobs * len(bounds)))
if self.optimizer_kwargs is not None:
if 'disp' in self.optimizer_kwargs.keys():
warnings.warn(
'Reporting progress for each single optimization job is deactivated for parallel multi-estimation methods',
UserWarning,
)
# collect arg instances for each parallel job
arg_instances = [
{
'unknowns' : unknowns,
'measurements' : measurements,
'bounds' : bounds,
'metric' : metric,
'use_global_optimizer' : True,
'handle_CVodeError' : handle_CVodeError,
'optimizer_kwargs' : {'disp' : False},
}
for job in range(jobs)
]
parallel_verbosity = 0
if report_level >= 1:
parallel_verbosity = 1
if report_level >= 2:
parallel_verbosity = 11
# do the jobs
repeated_estimates, results = self._estimate_parallelized_helper(arg_instances, unknowns, parallel_verbosity)
if report_level >= 1:
print('\nSummaries for empirical parameter distributions\n-----------------------------------------------')
print( | pandas.DataFrame(repeated_estimates) | pandas.DataFrame |
import json
from datetime import datetime
import numpy as np
import pandas as pd
from joblib import dump, load
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from os import mkdir
class kickstarter_predictor():
def __init__(self) -> None:
self._RSEED=42
self._json_cols=['category', 'location']
self._cat_features_impute = ['country', 'currency', 'category_name', 'location_type']
self._cat_features_onehot = ['country', 'currency', 'category_name', 'location_type']
self.preprocessor = ColumnTransformer(
transformers=[
#('cat_impute', SimpleImputer(strategy='constant', fill_value='missing'), self._cat_features_impute),
('cat_onehot', OneHotEncoder(handle_unknown='ignore'), self._cat_features_onehot),
('untouched', 'passthrough', ['duration','goal_usd', 'launched_at_month', 'created_at_month'])
#('untouched', 'passthrough', ['deadline','static_usd_rate', 'goal', 'launched_at', 'created_at'])
],
sparse_threshold=0
)
self.model = RandomForestClassifier(n_estimators=120, random_state=self._RSEED, max_features = 'sqrt', n_jobs=-1, verbose = 1)
try:
mkdir('./output')
except OSError:
print ("Creation of the directory output failed.")
def expand_json_cols(self, df):
"""
Expand columns that contain json objects
Parameters
---------
df: Pandas DataFrame
Returns
--------
df: Pandas DataFrame
"""
df_dicts = pd.DataFrame()
print('---------- Parsing json ------------')
for col in self._json_cols:
print('Parsing json: '+col)
c = []
for i, val in df[col].items():
try:
c.append(json.loads(val))
except:
c.append(dict())
df_dicts[col] = pd.Series(np.array(c))
print('---------- Expanding dictionaries --------')
df_expanded = []
for col in df_dicts.columns:
print('Expanding: '+col)
df_expanded.append( | pd.json_normalize(df_dicts[col]) | pandas.json_normalize |
import os
import param
import pandas as pd
from .tasks import add_async
from .projects import _get_project_dir
from .collections import get_collections
from .metadata import get_metadata, update_metadata
from .. import util
from .. import static
from ..plugins import load_providers, load_plugins, list_plugins
from ..database.database import get_db, db_session, select_datasets
@add_async
def download(catalog_entry, file_path, dataset=None, **kwargs):
"""Download dataset and save it locally.
Args:
catalog_entry (string, Required):
uri of catalog_entry within a service or collection
file_path (string, Required):
path location to save downloaded data
dataset (string, Optional, Default=None):
maybe only be used by some providers
async: (bool, Optional, Default=False)
if True, download in background
kwargs:
optional download kwargs
Returns:
data (dict):
details of downloaded data
"""
service_uri = catalog_entry
if file_path is None:
pass
provider, service, catalog_id = util.parse_service_uri(service_uri)
provider_plugin = load_providers()[provider]
data = provider_plugin.download(service=service, catalog_id=catalog_id,
file_path=file_path, dataset=dataset, **kwargs)
return data
@add_async
def publish(publisher_uri, options=None, **kwargs):
if isinstance(options, param.Parameterized):
options = dict(options.get_param_values())
options = options or dict()
options.update(kwargs)
provider, publisher, _ = util.parse_service_uri(publisher_uri)
provider_plugin = load_providers()[provider]
data = provider_plugin.publish(publisher=publisher, **options)
return data
@add_async
def download_datasets(datasets, options=None, raise_on_error=False):
"""Download datasets and save them in the Quest project.
Args:
datasets (string or list, Required):
datasets to download
options (dict, Optional, Default=None):
Dictionary of download options to stage the datasets with before downloading (see :func:`quest.api.stage_for_download`)
raise_on_error (bool, Optional, Default=False):
if True, if an error occurs raise an exception
async (bool, Optional, Default=False):
if True, download in background
Note:
If `options` are not provided then the `datasets` should already download options set by calling :func:`quest.api.stage_for_download`.
Returns:
status (dict):
download status of datasets
"""
if options is not None:
datasets = stage_for_download(uris=datasets, options=options)
datasets = get_metadata(datasets, as_dataframe=True)
if datasets.empty:
return
# filter out non download datasets
datasets = datasets[datasets['source'] == static.DatasetSource.WEB_SERVICE]
db = get_db()
project_path = _get_project_dir()
status = {}
for idx, dataset in datasets.iterrows():
collection_path = os.path.join(project_path, dataset['collection'])
catalog_entry = dataset["catalog_entry"]
try:
update_metadata(idx, quest_metadata={'status': static.DatasetStatus.PENDING})
kwargs = dataset['options'] or dict()
all_metadata = download(catalog_entry,
file_path=collection_path,
dataset=idx, **kwargs)
metadata = all_metadata.pop('metadata', None)
quest_metadata = all_metadata
quest_metadata.update({
'status': static.DatasetStatus.DOWNLOADED,
'message': 'success',
})
except Exception as e:
if raise_on_error:
raise
quest_metadata = {
'status': static.DatasetStatus.FAILED_DOWNLOAD,
'message': str(e),
}
metadata = None
status[idx] = quest_metadata['status']
quest_metadata.update({'metadata': metadata})
with db_session:
dataset = db.Dataset[idx]
dataset.set(**quest_metadata)
return status
def get_download_options(uris, fmt='json'):
"""List optional kwargs that can be specified when downloading a dataset.
Args:
uris (string or list, Required):
uris of catalog_entries or datasets
fmt (string, Required, Default='json'):
format in which to return download_options. One of ['json', 'param']
Returns:
download_options (dict):
download options that can be specified when calling
quest.api.stage_for_download or quest.api.download
"""
uris = util.listify(uris)
grouped_uris = util.classify_uris(uris, as_dataframe=False, exclude=['collections'])
services = grouped_uris.get(static.UriType.SERVICE) or []
datasets = grouped_uris.get(static.UriType.DATASET) or []
service_uris = {s: s for s in services}
service_uris.update({dataset: get_metadata(dataset)[dataset]['catalog_entry'] for dataset in datasets})
options = {}
for uri, service_uri in service_uris.items():
provider, service, _ = util.parse_service_uri(service_uri)
provider_plugin = load_providers()[provider]
options[uri] = provider_plugin.get_download_options(service, fmt)
return options
def get_publish_options(publish_uri, fmt='json'):
uris = util.listify(publish_uri)
options = {}
for uri in uris:
publish_uri = uri
provider, publisher, _ = util.parse_service_uri(publish_uri)
provider_plugin = load_providers()[provider]
options[uri] = provider_plugin.publish_options(publisher, fmt)
return options
@add_async
def get_datasets(expand=None, filters=None, queries=None, as_dataframe=None):
"""Return all available datasets in active project.
Args:
expand (bool, Optional, Default=None):
include dataset details and format as dict
filters(dict, Optional, Default=None):
filter dataset by any metadata field
queries(list, Optional, Default=None):
list of string arguments to pass to pandas.DataFrame.query to filter the datasets
as_dataframe (bool or None, Optional, Default=None):
include dataset details and format as pandas dataframe
Returns:
uris (list, dict, pandas Dataframe, Default=list):
staged dataset uids
"""
datasets = select_datasets()
datasets = | pd.DataFrame(datasets) | pandas.DataFrame |
from __future__ import division
from datetime import datetime
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
import pandas as pd
import numpy as np
import random
from nose.tools import assert_almost_equal as aae
import bt
import bt.algos as algos
def test_algo_name():
class TestAlgo(algos.Algo):
pass
actual = TestAlgo()
assert actual.name == 'TestAlgo'
class DummyAlgo(algos.Algo):
def __init__(self, return_value=True):
self.return_value = return_value
self.called = False
def __call__(self, target):
self.called = True
return self.return_value
def test_algo_stack():
algo1 = DummyAlgo(return_value=True)
algo2 = DummyAlgo(return_value=False)
algo3 = DummyAlgo(return_value=True)
target = mock.MagicMock()
stack = bt.AlgoStack(algo1, algo2, algo3)
actual = stack(target)
assert not actual
assert algo1.called
assert algo2.called
assert not algo3.called
def test_print_temp_data():
target = mock.MagicMock()
target.temp={}
target.temp['selected'] = ['c1','c2']
target.temp['weights'] = [0.5,0.5]
algo = algos.PrintTempData()
assert algo( target )
algo = algos.PrintTempData( 'Selected: {selected}')
assert algo( target )
def test_print_info():
target = bt.Strategy('s', [])
target.temp={}
algo = algos.PrintInfo()
assert algo( target )
algo = algos.PrintInfo( '{now}: {name}')
assert algo( target )
def test_run_once():
algo = algos.RunOnce()
assert algo(None)
assert not algo(None)
assert not algo(None)
def test_run_period():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunPeriod()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
target.now = None
assert not algo(target)
# run on first date
target.now = dts[0]
assert not algo(target)
# run on first supplied date
target.now = dts[1]
assert algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert not algo(target)
algo = algos.RunPeriod(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
dts = target.data.index
# run on first date
target.now = dts[0]
assert not algo(target)
# first supplied date
target.now = dts[1]
assert not algo(target)
# run on last date
target.now = dts[len(dts) - 1]
assert algo(target)
# date not in index
target.now = datetime(2009, 2, 15)
assert not algo(target)
def test_run_daily():
target = mock.MagicMock()
dts = pd.date_range('2010-01-01', periods=35)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
algo = algos.RunDaily()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('',[algo]),
data
)
target.data = backtest.data
target.now = dts[1]
assert algo(target)
def test_run_weekly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunWeekly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert not algo(target)
# new week
target.now = dts[3]
assert algo(target)
algo = algos.RunWeekly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of week
target.now = dts[2]
assert algo(target)
# new week
target.now = dts[3]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8),datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_monthly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunMonthly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert not algo(target)
# new month
target.now = dts[31]
assert algo(target)
algo = algos.RunMonthly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of month
target.now = dts[30]
assert algo(target)
# new month
target.now = dts[31]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_quarterly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunQuarterly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert not algo(target)
# new quarter
target.now = dts[90]
assert algo(target)
algo = algos.RunQuarterly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of quarter
target.now = dts[89]
assert algo(target)
# new quarter
target.now = dts[90]
assert not algo(target)
dts = pd.DatetimeIndex([datetime(2016, 1, 3), datetime(2017, 1, 8), datetime(2018, 1, 7)])
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# check next year
target.now = dts[1]
assert algo(target)
def test_run_yearly():
dts = pd.date_range('2010-01-01', periods=367)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
target = mock.MagicMock()
target.data = data
algo = algos.RunYearly()
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert not algo(target)
# new year
target.now = dts[365]
assert algo(target)
algo = algos.RunYearly(
run_on_first_date=False,
run_on_end_of_period=True,
run_on_last_date=True
)
# adds the initial day
backtest = bt.Backtest(
bt.Strategy('', [algo]),
data
)
target.data = backtest.data
# end of year
target.now = dts[364]
assert algo(target)
# new year
target.now = dts[365]
assert not algo(target)
def test_run_on_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunOnDate('2010-01-01', '2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
def test_run_if_out_of_bounds():
algo = algos.RunIfOutOfBounds(0.5)
dts = pd.date_range('2010-01-01', periods=3)
s = bt.Strategy('s')
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.temp['selected'] = ['c1', 'c2']
s.temp['weights'] = {'c1': .5, 'c2':.5}
s.update(dts[0])
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c1']._weight = 0.5
s.children['c2']._weight = 0.5
assert not algo(s)
s.children['c1']._weight = 0.25
s.children['c2']._weight = 0.75
assert not algo(s)
s.children['c1']._weight = 0.24
s.children['c2']._weight = 0.76
assert algo(s)
s.children['c1']._weight = 0.75
s.children['c2']._weight = 0.25
assert not algo(s)
s.children['c1']._weight = 0.76
s.children['c2']._weight = 0.24
assert algo(s)
def test_run_after_date():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDate('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert algo(target)
def test_run_after_days():
target = mock.MagicMock()
target.now = pd.to_datetime('2010-01-01')
algo = algos.RunAfterDays(3)
assert not algo(target)
assert not algo(target)
assert not algo(target)
assert algo(target)
def test_set_notional():
algo = algos.SetNotional('notional')
s = bt.FixedIncomeStrategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
notional = pd.Series(index=dts[:2], data=[1e6, 5e6])
s.setup( data, notional = notional )
s.update(dts[0])
assert algo(s)
assert s.temp['notional_value'] == 1e6
s.update(dts[1])
assert algo(s)
assert s.temp['notional_value'] == 5e6
s.update(dts[2])
assert not algo(s)
def test_rebalance():
algo = algos.Rebalance()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c1 = s['c1']
assert c1.value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 1000
assert s.capital == 0
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000
assert c2.position == 10
assert c2.weight == 1.
def test_rebalance_with_commissions():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 999
assert s.capital == 99
c1 = s['c1']
assert c1.value == 900
assert c1.position == 9
assert c1.weight == 900 / 999.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 997
assert s.capital == 97
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 900
assert c2.position == 9
assert c2.weight == 900. / 997
def test_rebalance_with_cash():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 1}
# set cash amount
s.temp['cash'] = 0.5
assert algo(s)
assert s.value == 999
assert s.capital == 599
c1 = s['c1']
assert c1.value == 400
assert c1.position == 4
assert c1.weight == 400.0 / 999
s.temp['weights'] = {'c2': 1}
# change cash amount
s.temp['cash'] = 0.25
assert algo(s)
assert s.value == 997
assert s.capital == 297
c2 = s['c2']
assert c1.value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 700
assert c2.position == 7
assert c2.weight == 700.0 / 997
def test_rebalance_updatecount():
algo = algos.Rebalance()
s = bt.Strategy('s')
s.use_integer_positions(False)
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4','c5'], data=100)
s.setup(data)
s.adjust(1000)
s.update(dts[0])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
assert s.value == 1000
assert s.capital == 0
# Update is called once when each weighted security is created (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[1])
s.temp['weights'] = {'c1': 0.5, 'c2':0.5}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (4)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 8
s.update(dts[2])
s.temp['weights'] = {'c1': 0.25, 'c2':0.25, 'c3':0.25, 'c4':0.25}
update = bt.core.SecurityBase.update
bt.core.SecurityBase._update_call_count = 0
def side_effect(self, *args, **kwargs):
bt.core.SecurityBase._update_call_count += 1
return update(self, *args, **kwargs)
with mock.patch.object(bt.core.SecurityBase, 'update', side_effect) as mock_update:
assert algo(s)
# Update is called once for each weighted security before allocation (2)
# and once for each security after all allocations are made (4)
assert bt.core.SecurityBase._update_call_count == 6
def test_rebalance_fixedincome():
algo = algos.Rebalance()
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
s = bt.FixedIncomeStrategy('s', children = [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
coupons = pd.DataFrame(index=dts, columns=['c2'], data=0)
s.setup(data, coupons=coupons)
s.update(dts[0])
s.temp['notional_value'] = 1000
s.temp['weights'] = {'c1': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000
c1 = s['c1']
assert c1.value == 1000
assert c1.notional_value == 1000
assert c1.position == 10
assert c1.weight == 1.
s.temp['weights'] = {'c2': 1}
assert algo(s)
assert s.value == 0.
assert s.notional_value == 1000
assert s.capital == -1000*100
c2 = s['c2']
assert c1.value == 0
assert c1.notional_value == 0
assert c1.position == 0
assert c1.weight == 0
assert c2.value == 1000*100
assert c2.notional_value == 1000
assert c2.position == 1000
assert c2.weight == 1.
def test_select_all():
algo = algos.SelectAll()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectAll(include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectAll(include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly_n_none():
algo = algos.SelectRandomly(n=None) # Behaves like SelectAll
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectRandomly(n=None, include_no_data=True)
assert algo2(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp.pop('selected')
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectRandomly(n=None, include_negative=True)
assert algo3(s)
selected = s.temp.pop('selected')
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_randomly():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[0]] = np.nan
data['c2'][dts[0]] = 95
data['c3'][dts[0]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectRandomly(n=1)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
random.seed(1000)
algo = algos.SelectRandomly(n=1, include_negative=True)
assert algo(s)
assert s.temp.pop('selected') == ['c3']
random.seed(1009)
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c1']
random.seed(1009)
# If selected already set, it will further filter it
s.temp['selected'] = ['c2']
algo = algos.SelectRandomly(n=1, include_no_data=True)
assert algo(s)
assert s.temp.pop('selected') == ['c2']
def test_select_these():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
s.setup(data)
s.update(dts[0])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
algo = algos.SelectThese( ['c1'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectThese( ['c1', 'c2'], include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectThese(['c1', 'c2'], include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where_all():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c1'][dts[2]] = -5
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
s.setup(data, where = where)
s.update(dts[0])
algo = algos.SelectWhere('where')
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# make sure don't keep nan
s.update(dts[1])
algo = algos.SelectThese( ['c1', 'c2'])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# if specify include_no_data then 2
algo2 = algos.SelectWhere('where', include_no_data=True)
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# behavior on negative prices
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
algo3 = algos.SelectWhere('where', include_negative=True)
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
def test_select_where():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere('where')
s.setup(data, where=where)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_where_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
where = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=True)
where.loc[ dts[1] ] = False
where['c1'].loc[ dts[2] ] = False
algo = algos.SelectWhere(where)
s.setup(data)
s.update(dts[0])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
s.update(dts[1])
assert algo(s)
assert s.temp['selected'] == []
s.update(dts[2])
assert algo(s)
assert s.temp['selected'] == ['c2']
def test_select_regex():
s = bt.Strategy('s')
algo = algos.SelectRegex( 'c1' )
s.temp['selected'] = ['a1', 'c1', 'c2', 'c11', 'cc1']
assert algo( s )
assert s.temp['selected'] == ['c1', 'c11', 'cc1']
algo = algos.SelectRegex( '^c1$' )
assert algo( s )
assert s.temp['selected'] == ['c1']
def test_resolve_on_the_run():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'b1'], data=100.)
data['c1'][dts[1]] = np.nan
data['c2'][dts[1]] = 95
data['c2'][dts[2]] = -5
on_the_run = pd.DataFrame(index=dts, columns=['c'], data='c1')
on_the_run.loc[dts[2], 'c'] = 'c2'
s.setup(data, on_the_run = on_the_run)
s.update(dts[0])
s.temp['selected'] = ['c', 'b1']
algo = algos.ResolveOnTheRun( 'on_the_run' )
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# make sure don't keep nan
s.update(dts[1])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
# if specify include_no_data then 2
algo2 = algos.ResolveOnTheRun('on_the_run', include_no_data=True)
s.temp['selected'] = ['c', 'b1']
assert algo2(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'b1' in selected
# behavior on negative prices
s.update(dts[2])
s.temp['selected'] = ['c', 'b1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'b1' in selected
algo3 = algos.ResolveOnTheRun('on_the_run', include_negative=True)
s.temp['selected'] = ['c', 'b1']
assert algo3(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c2' in selected
assert 'b1' in selected
def test_select_types():
c1 = bt.Security('c1')
c2 = bt.CouponPayingSecurity('c2')
c3 = bt.HedgeSecurity('c3')
c4 = bt.CouponPayingHedgeSecurity('c4')
c5 = bt.FixedIncomeSecurity('c5')
s = bt.Strategy('p', children = [c1, c2, c3, c4, c5])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3', 'c4', 'c5'], data=100.)
coupons = pd.DataFrame(index=dts, columns=['c2', 'c4'], data=0.)
s.setup(data, coupons = coupons)
i = 0
s.update(dts[i])
algo = algos.SelectTypes(include_types=(bt.Security, bt.HedgeSecurity), exclude_types=())
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3'])
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,), exclude_types=(bt.CouponPayingSecurity,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c3', 'c5'])
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.SelectTypes(include_types=(bt.core.SecurityBase,))
assert algo(s)
assert set(s.temp.pop('selected')) == set(['c1', 'c2', 'c3'])
def test_weight_equally():
algo = algos.WeighEqually()
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
s.update(dts[0])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.5
assert 'c2' in weights
assert weights['c2'] == 0.5
def test_weight_specified():
algo = algos.WeighSpecified(c1=0.6, c2=0.4)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert 'c1' in weights
assert weights['c1'] == 0.6
assert 'c2' in weights
assert weights['c2'] == 0.4
def test_scale_weights():
s = bt.Strategy('s')
algo = algos.ScaleWeights( -0.5 )
s.temp['weights'] = {'c1': 0.5, 'c2': -0.4, 'c3':0 }
assert algo( s )
assert s.temp['weights'] == {'c1':-0.25, 'c2':0.2, 'c3':0}
def test_select_has_data():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=10)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
def test_select_has_data_preselected():
algo = algos.SelectHasData(min_count=3, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[0]] = np.nan
data['c1'].loc[dts[1]] = np.nan
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1']
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
@mock.patch('ffn.calc_erc_weights')
def test_weigh_erc(mock_erc):
algo = algos.WeighERC(lookback=pd.DateOffset(days=5))
mock_erc.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_erc.called
rets = mock_erc.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_target():
algo = algos.WeighTarget('target')
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
target = pd.DataFrame(index=dts[:2], columns=['c1', 'c2'], data=0.5)
target['c1'].loc[dts[1]] = 1.0
target['c2'].loc[dts[1]] = 0.0
s.setup( data, target = target )
s.update(dts[0])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.5
assert weights['c2'] == 0.5
s.update(dts[1])
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 1.0
assert weights['c2'] == 0.0
s.update(dts[2])
assert not algo(s)
def test_weigh_inv_vol():
algo = algos.WeighInvVol(lookback=pd.DateOffset(days=5))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
# high vol c1
data['c1'].loc[dts[1]] = 105
data['c1'].loc[dts[2]] = 95
data['c1'].loc[dts[3]] = 105
data['c1'].loc[dts[4]] = 95
# low vol c2
data['c2'].loc[dts[1]] = 100.1
data['c2'].loc[dts[2]] = 99.9
data['c2'].loc[dts[3]] = 100.1
data['c2'].loc[dts[4]] = 99.9
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c2'] > weights['c1']
aae(weights['c1'], 0.020, 3)
aae(weights['c2'], 0.980, 3)
@mock.patch('ffn.calc_mean_var_weights')
def test_weigh_mean_var(mock_mv):
algo = algos.WeighMeanVar(lookback=pd.DateOffset(days=5))
mock_mv.return_value = pd.Series({'c1': 0.3, 'c2': 0.7})
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.update(dts[4])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
assert mock_mv.called
rets = mock_mv.call_args[0][0]
assert len(rets) == 4
assert 'c1' in rets
assert 'c2' in rets
weights = s.temp['weights']
assert len(weights) == 2
assert weights['c1'] == 0.3
assert weights['c2'] == 0.7
def test_weigh_randomly():
s = bt.Strategy('s')
s.temp['selected'] = ['c1', 'c2', 'c3']
algo = algos.WeighRandomly()
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
assert sum( weights.values() ) == 1.
algo = algos.WeighRandomly( (0.3,0.5), 0.95)
assert algo(s)
weights = s.temp['weights']
assert len( weights ) == 3
aae( sum( weights.values() ), 0.95 )
for c in s.temp['selected']:
assert weights[c] <= 0.5
assert weights[c] >= 0.3
def test_set_stat():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( 'test_stat' )
s.setup(data, test_stat = stat)
s.update(dts[0])
print()
print(s.get_data('test_stat'))
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_set_stat_legacy():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[1]] = 105
data['c2'].loc[dts[1]] = 95
stat = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=4.)
stat['c1'].loc[dts[1]] = 5.
stat['c2'].loc[dts[1]] = 6.
algo = algos.SetStat( stat )
s.setup(data)
s.update(dts[0])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 4.
assert stat['c2'] == 4.
s.update(dts[1])
assert algo(s)
stat = s.temp['stat']
assert stat['c1'] == 5.
assert stat['c2'] == 6.
def test_stat_total_return():
algo = algos.StatTotalReturn(lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
stat = s.temp['stat']
assert len(stat) == 2
assert stat['c1'] == 105.0 / 100 - 1
assert stat['c2'] == 95.0 / 100 - 1
def test_select_n():
algo = algos.SelectN(n=1, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
algo = algos.SelectN(n=1, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c2' in selected
# return 2 we have if all_or_none false
algo = algos.SelectN(n=3, sort_descending=False)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 2
assert 'c1' in selected
assert 'c2' in selected
# return 0 we have if all_or_none true
algo = algos.SelectN(n=3, sort_descending=False, all_or_none=True)
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 0
def test_select_n_perc():
algo = algos.SelectN(n=0.5, sort_descending=True)
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['stat'] = data.calc_total_return()
assert algo(s)
selected = s.temp['selected']
assert len(selected) == 1
assert 'c1' in selected
def test_select_momentum():
algo = algos.SelectMomentum(n=1, lookback=pd.DateOffset(days=3))
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
data['c1'].loc[dts[2]] = 105
data['c2'].loc[dts[2]] = 95
s.setup(data)
s.update(dts[2])
s.temp['selected'] = ['c1', 'c2']
assert algo(s)
actual = s.temp['selected']
assert len(actual) == 1
assert 'c1' in actual
def test_limit_weights():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 0.6, 'c2':0.2, 'c3':0.2}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.5
assert w['c2'] == 0.25
assert w['c3'] == 0.25
algo = algos.LimitWeights(0.3)
assert algo(s)
w = s.temp['weights']
assert w == {}
s.temp['weights'] = {'c1': 0.4, 'c2':0.3, 'c3':0.3}
algo = algos.LimitWeights(0.5)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.4
assert w['c2'] == 0.3
assert w['c3'] == 0.3
def test_limit_deltas():
s = bt.Strategy('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100.)
s.setup(data)
s.temp['weights'] = {'c1': 1}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.1
s.temp['weights'] = {'c1': 0.05}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert w['c1'] == 0.05
s.temp['weights'] = {'c1': 0.5, 'c2': 0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == 0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.1
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.5
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas({'c1': 0.1, 'c2': 0.3})
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.1
assert w['c2'] == -0.3
# set exisitng weight
s.children['c1'] = bt.core.SecurityBase('c1')
s.children['c1']._weight = 0.3
s.children['c2'] = bt.core.SecurityBase('c2')
s.children['c2']._weight = -0.7
s.temp['weights'] = {'c1': 0.5, 'c2': -0.5}
algo = algos.LimitDeltas(0.1)
assert algo(s)
w = s.temp['weights']
assert len(w) == 2
assert w['c1'] == 0.4
assert w['c2'] == -0.6
def test_rebalance_over_time():
target = mock.MagicMock()
rb = mock.MagicMock()
algo = algos.RebalanceOverTime(n=2)
# patch in rb function
algo._rb = rb
target.temp = {}
target.temp['weights'] = {'a': 1, 'b': 0}
a = mock.MagicMock()
a.weight = 0.
b = mock.MagicMock()
b.weight = 1.
target.children = {'a': a, 'b': b}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 0.5
assert w['b'] == 0.5
assert rb.called
called_tgt = rb.call_args[0][0]
called_tgt_w = called_tgt.temp['weights']
assert len(called_tgt_w) == 2
assert called_tgt_w['a'] == 0.5
assert called_tgt_w['b'] == 0.5
# update weights for next call
a.weight = 0.5
b.weight = 0.5
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
w = target.temp['weights']
assert len(w) == 2
assert w['a'] == 1.
assert w['b'] == 0.
assert rb.call_count == 2
# update weights for next call
# should do nothing now
a.weight = 1
b.weight = 0
# clear out temp - same as would Strategy
target.temp = {}
assert algo(target)
# no diff in call_count since last time
assert rb.call_count == 2
def test_require():
target = mock.MagicMock()
target.temp = {}
algo = algos.Require(lambda x: len(x) > 0, 'selected')
assert not algo(target)
target.temp['selected'] = []
assert not algo(target)
target.temp['selected'] = ['a', 'b']
assert algo(target)
def test_run_every_n_periods():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=0)
target.now = pd.to_datetime('2010-01-01')
assert algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert not algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert algo(target)
target.now = pd.to_datetime('2010-01-05')
assert not algo(target)
def test_run_every_n_periods_offset():
target = mock.MagicMock()
target.temp = {}
algo = algos.RunEveryNPeriods(n=3, offset=1)
target.now = pd.to_datetime('2010-01-01')
assert not algo(target)
# run again w/ no date change should not trigger
assert not algo(target)
target.now = pd.to_datetime('2010-01-02')
assert algo(target)
target.now = pd.to_datetime('2010-01-03')
assert not algo(target)
target.now = pd.to_datetime('2010-01-04')
assert not algo(target)
target.now = pd.to_datetime('2010-01-05')
assert algo(target)
def test_not():
target = mock.MagicMock()
target.temp = {}
#run except on the 1/2/18
runOnDateAlgo = algos.RunOnDate(pd.to_datetime('2018-01-02'))
notAlgo = algos.Not(runOnDateAlgo)
target.now = pd.to_datetime('2018-01-01')
assert notAlgo(target)
target.now = | pd.to_datetime('2018-01-02') | pandas.to_datetime |
""" test the scalar Timestamp """
import pytz
import pytest
import dateutil
import calendar
import locale
import numpy as np
from dateutil.tz import tzutc
from pytz import timezone, utc
from datetime import datetime, timedelta
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tseries import offsets
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz
from pandas.errors import OutOfBoundsDatetime
from pandas.compat import long, PY3
from pandas.compat.numpy import np_datetime64_compat
from pandas import Timestamp, Period, Timedelta, NaT
class TestTimestampProperties(object):
def test_properties_business(self):
ts = Timestamp('2017-10-01', freq='B')
control = Timestamp('2017-10-01')
assert ts.dayofweek == 6
assert not ts.is_month_start # not a weekday
assert not ts.is_quarter_start # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_start
assert control.is_quarter_start
ts = Timestamp('2017-09-30', freq='B')
control = Timestamp('2017-09-30')
assert ts.dayofweek == 5
assert not ts.is_month_end # not a weekday
assert not ts.is_quarter_end # not a weekday
# Control case: non-business is month/qtr start
assert control.is_month_end
assert control.is_quarter_end
def test_fields(self):
def check(value, equal):
# that we are int/long like
assert isinstance(value, (int, long))
assert value == equal
# GH 10050
ts = Timestamp('2015-05-10 09:06:03.000100001')
check(ts.year, 2015)
check(ts.month, 5)
check(ts.day, 10)
check(ts.hour, 9)
check(ts.minute, 6)
check(ts.second, 3)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 100)
check(ts.nanosecond, 1)
check(ts.dayofweek, 6)
check(ts.quarter, 2)
check(ts.dayofyear, 130)
check(ts.week, 19)
check(ts.daysinmonth, 31)
check(ts.daysinmonth, 31)
# GH 13303
ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern')
check(ts.year, 2014)
check(ts.month, 12)
check(ts.day, 31)
check(ts.hour, 23)
check(ts.minute, 59)
check(ts.second, 0)
pytest.raises(AttributeError, lambda: ts.millisecond)
check(ts.microsecond, 0)
check(ts.nanosecond, 0)
check(ts.dayofweek, 2)
check(ts.quarter, 4)
check(ts.dayofyear, 365)
check(ts.week, 1)
check(ts.daysinmonth, 31)
ts = Timestamp('2014-01-01 00:00:00+01:00')
starts = ['is_month_start', 'is_quarter_start', 'is_year_start']
for start in starts:
assert getattr(ts, start)
ts = Timestamp('2014-12-31 23:59:59+01:00')
ends = ['is_month_end', 'is_year_end', 'is_quarter_end']
for end in ends:
assert getattr(ts, end)
# GH 12806
@pytest.mark.parametrize('data',
[Timestamp('2017-08-28 23:00:00'),
Timestamp('2017-08-28 23:00:00', tz='EST')])
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_names(self, data, time_locale):
# GH 17354
# Test .weekday_name, .day_name(), .month_name
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
assert data.weekday_name == 'Monday'
if time_locale is None:
expected_day = 'Monday'
expected_month = 'August'
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_day = calendar.day_name[0].capitalize()
expected_month = calendar.month_name[8].capitalize()
assert data.day_name(time_locale) == expected_day
assert data.month_name(time_locale) == expected_month
# Test NaT
nan_ts = Timestamp(NaT)
assert np.isnan(nan_ts.day_name(time_locale))
assert np.isnan(nan_ts.month_name(time_locale))
@pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo'])
def test_is_leap_year(self, tz):
# GH 13727
dt = Timestamp('2000-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
assert isinstance(dt.is_leap_year, bool)
dt = Timestamp('1999-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
dt = Timestamp('2004-01-01 00:00:00', tz=tz)
assert dt.is_leap_year
dt = Timestamp('2100-01-01 00:00:00', tz=tz)
assert not dt.is_leap_year
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013, 12, 31)
result = Timestamp(d).week
expected = 1 # ISO standard
assert result == expected
d = datetime(2008, 12, 28)
result = Timestamp(d).week
expected = 52 # ISO standard
assert result == expected
d = datetime(2009, 12, 31)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 1)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
d = datetime(2010, 1, 3)
result = Timestamp(d).week
expected = 53 # ISO standard
assert result == expected
result = np.array([Timestamp(datetime(*args)).week
for args in [(2000, 1, 1), (2000, 1, 2), (
2005, 1, 1), (2005, 1, 2)]])
assert (result == [52, 52, 53, 53]).all()
class TestTimestampConstructors(object):
def test_constructor(self):
base_str = '2014-07-01 09:00'
base_dt = datetime(2014, 7, 1, 9)
base_expected = 1404205200000000000
# confirm base representation is correct
import calendar
assert (calendar.timegm(base_dt.timetuple()) * 1000000000 ==
base_expected)
tests = [(base_str, base_dt, base_expected),
('2014-07-01 10:00', datetime(2014, 7, 1, 10),
base_expected + 3600 * 1000000000),
('2014-07-01 09:00:00.000008000',
datetime(2014, 7, 1, 9, 0, 0, 8),
base_expected + 8000),
('2014-07-01 09:00:00.000000005',
Timestamp('2014-07-01 09:00:00.000000005'),
base_expected + 5)]
timezones = [(None, 0), ('UTC', 0), (pytz.utc, 0), ('Asia/Tokyo', 9),
('US/Eastern', -4), ('dateutil/US/Pacific', -7),
(pytz.FixedOffset(-180), -3),
(dateutil.tz.tzoffset(None, 18000), 5)]
for date_str, date, expected in tests:
for result in [Timestamp(date_str), Timestamp(date)]:
# only with timestring
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# re-creation shouldn't affect to internal value
result = Timestamp(result)
assert result.value == expected
assert conversion.pydt_to_i8(result) == expected
# with timezone
for tz, offset in timezones:
for result in [Timestamp(date_str, tz=tz), Timestamp(date,
tz=tz)]:
expected_tz = expected - offset * 3600 * 1000000000
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should preserve tz
result = Timestamp(result)
assert result.value == expected_tz
assert conversion.pydt_to_i8(result) == expected_tz
# should convert to UTC
result = Timestamp(result, tz='UTC')
expected_utc = expected - offset * 3600 * 1000000000
assert result.value == expected_utc
assert | conversion.pydt_to_i8(result) | pandas._libs.tslibs.conversion.pydt_to_i8 |
# encoding: utf-8
import tkinter.messagebox
import webbrowser
from tkinter import *
import jieba
import pandas as pd
import pymongo
from pyecharts import options as opts
from pyecharts.charts import Bar, Page, Pie, WordCloud, Line, Map
class App:
def __init__(self, master):
self.master = master
self.initWidgets()
def initWidgets(self):
def DiQuXinZi():
webbrowser.open_new('地区薪资分布图.html')
def XueLiXinZi():
webbrowser.open_new('学历薪资图.html')
def JingYanXinZi():
webbrowser.open_new('经验-薪资对比图.html')
def GongSiFuLi():
webbrowser.open_new('公司福利词云图.html')
def ChaXun():
Gangwei = self.txt1.get()
City = self.txt2.get()
df.drop_duplicates(['职位信息'], inplace=True)
get_data = df[df['工作岗位'].str.contains('{}'.format(Gangwei), case=False)]
# self.txt3.set("数据表:" + collection.name + "--包含数据%s条" % len(get_data))
self.txt3.set('正在生成!')
City_Salary(get_data, City)
One_avgs = [One_City(get_data, i, City) for i in edul]
bar_same_series_gap(edul, City, One_avgs).render('学历薪资图.html')
Get_Photo(get_data, City)
FuLiCiYun(get_data, City)
self.txt3.set('生成完成,请查看!')
self.txt1 = StringVar()
self.txt2 = StringVar()
self.txt3 = StringVar()
self.txt1.set('java')
self.txt2.set('重庆')
# 这里是队名Logo
global photo
photo = PhotoImage(file='redafine.png')
fm = Frame(self.master)
fm.pack(side=LEFT, fill=BOTH, expand=NO, )
Button(fm, text='Redafine', image=photo, relief=RIDGE).pack(side=TOP, fill=X, expand=NO, padx=5, pady=5)
# 顶部
fmTop = Frame(self.master)
fmTop.pack(side=TOP, fill=BOTH, expand=NO, padx=150)
Entry(fmTop, textvariable=self.txt1, width=14, font=('楷体', 16),
relief=RIDGE).pack(side=LEFT, expand=NO, fill=X, padx=20, pady=10)
Label(fmTop, text="岗位、关键字", font=('思源黑体')).pack(side=LEFT, expand=NO, fill=Y, padx=0, pady=10)
Entry(fmTop, textvariable=self.txt2, width=14, font=('楷体', 16),
relief=RIDGE).pack(side=LEFT, expand=NO, fill=X, padx=20, pady=10)
Label(fmTop, text="城市选择", font=('思源黑体')).pack(side=LEFT, expand=NO, fill=Y, pady=10)
Button(fmTop, text='点击查询', command=ChaXun, font=('思源黑体'), height=2, width=10, relief=RIDGE).pack(side=RIGHT,
fill=X,
expand=NO,
padx=20,
pady=10)
# 左边导航栏
fm1 = Frame(fm)
fm1.pack(side=LEFT, fill=Y, expand=NO, pady=60)
Button(fm1, text='地区薪资', height=3, command=DiQuXinZi, width=20, font=('思源黑体'), relief=RIDGE).pack(side=TOP,
fill=X,
padx=10)
Button(fm1, text='学历薪资', height=3, command=XueLiXinZi, font=('思源黑体'), relief=RIDGE).pack(side=TOP, fill=X,
pady=10,
padx=10)
Button(fm1, text='经验薪资', height=3, command=JingYanXinZi, font=('思源黑体'), relief=RIDGE).pack(side=TOP, fill=X,
padx=10)
Button(fm1, text='公司福利', height=3, command=GongSiFuLi, font=('思源黑体'), relief=RIDGE).pack(side=TOP, fill=X,
pady=10, padx=10)
# 这里是主体
monty = LabelFrame(root, text="数据系统", font=('思源黑体', 16), width=300, padx=30, )
monty.pack(side=LEFT, fill=Y, expand=NO, padx=30) # expand,是否随窗体大小改变而改变位置
can = tkinter.Canvas(monty, bg='#afdfe4')
can.pack(expand=YES, fill=BOTH)
Label(can, textvariable=self.txt3, relief='ridge', font=('思源黑体', 12), width=100).grid(row=0, column=1, pady=10,
padx=50)
def bar_same_series_gap(edul, City, One_avgs) -> Bar:
c = (
Bar()
.add_xaxis(edul)
.add_yaxis("{}".format(City), One_avgs, category_gap="50%")
.set_global_opts(title_opts=opts.TitleOpts(title="{}地区-学历薪资".format(City)))
)
return c
def City_Salary(get_data, City_name):
work_salas = get_data[get_data['城市'].str.contains('{}'.format(City_name))]
sums = [sala for sala in work_salas['平均薪资'] if type(sala) == int] # 工资列表
avg = int(sum(sums) / int(len(sums))) # 平均工资
def Area_Salary():
Data = work_salas['地区'].value_counts()
attr = list(Data.index)
value = [int(i) for i in list(Data.values)]
print(attr) # 统计地区
print(value) # 统计地区的数量
return attr, value # 返回地区以及对应的数量
attr, value = Area_Salary()
sala = []
for i in attr:
work_sa = work_salas[work_salas['地区'].str.contains('{}'.format(i))]
sums = [sala for sala in work_sa['平均薪资'] if type(sala) == int]
avg = int(sum(sums) / int(len(sums)))
sala.append(avg)
print(sala)
def map_guangdong() -> Map:
c = (
Map()
.add("{}".format(City_name), [list(z) for z in zip(attr, value)], "{}".format(City_name))
.set_global_opts(
title_opts=opts.TitleOpts(title="{}地区-岗位分布".format(City_name)),
visualmap_opts=opts.VisualMapOpts(),
)
)
return c
def bar_same_series_gap() -> Bar:
c = (
Bar()
.add_xaxis(attr)
.add_yaxis("{}".format(City_name), sala, category_gap="50%")
.set_global_opts(title_opts=opts.TitleOpts(title="{}地区-薪资分布".format(City_name)))
.set_series_opts(
label_opts=opts.LabelOpts(is_show=False),
markpoint_opts=opts.MarkPointOpts(
data=[
opts.MarkPointItem(type_="max", name="最大值"),
opts.MarkPointItem(type_="min", name="最小值"),
opts.MarkPointItem(type_="average", name="平均值"),
])
)
)
return c
page = Page()
page.add(map_guangdong(), bar_same_series_gap())
page.render('地区薪资分布图.html')
return sala, attr, value
def One_City(get_data, Edu_name, City):
One_City = get_data[get_data['城市'].str.contains('{}'.format(City))]
work_salas = One_City[One_City['学历要求'].str.contains('{}'.format(Edu_name))]
CQ_sum = [sala for sala in work_salas['平均薪资'] if type(sala) == int] # 工资列表
try:
if len(CQ_sum) < 3: # 过滤掉只包含三个职位的学历
avg = 0
else:
avg = int(sum(CQ_sum) / int(len(CQ_sum)))
except:
avg = 0
return avg
def Get_Photo(get_data, City_name):
work = ['3-4年经验', '无工作经验', '5-7年经验', '2年经验', '1年经验', '8-9年经验', '10年以上经验']
workyear = ['无工作经验', '1年经验', '2年经验', '3-4年经验', '5-7年经验', '8-9年经验', '10年以上经验']
Data = get_data[get_data['城市'].str.contains('{}'.format(City_name))]
print("得到有效数据%s条" % len(Data))
"""计算不同工作经验对应的平均薪资"""
def Work_Salary(Work_name):
work_salas = Data[Data['工作经验'].str.contains('{}'.format(Work_name))]
sums = [sala for sala in work_salas['平均薪资'] if type(sala) == int] # 工资列表
try:
avg = int(sum(sums) / int(len(sums)))
except:
avg = 0
return avg
All_avgs = [Work_Salary(i) for i in workyear]
print('平均薪资', All_avgs)
# 工作经验部分,排序处理
def workyear_ChuLi(Data):
value_workyear = []
for i in workyear:
try:
value_workyear.append(int(Data[i]))
except:
value_workyear.append(0)
return value_workyear
# 统计工作经验对应得岗位数量
Data1 = Data['工作经验'].value_counts()
value_workyear1 = workyear_ChuLi(Data1)
print(workyear)
print(value_workyear1)
"""工作经验文本饼图"""
def pie_rich_label() -> Pie:
c = (
Pie()
.add(
"",
[list(z) for z in zip(workyear, value_workyear1)],
radius=["40%", "55%"],
label_opts=opts.LabelOpts(
position="outside",
formatter="{a|{a}}{abg|}\n{hr|}\n {b|{b}: }{c} {per|{d}%} ",
background_color="#eee",
border_color="#aaa",
border_width=1,
border_radius=4,
rich={
"a": {"color": "#999", "lineHeight": 22, "align": "center"},
"abg": {
"backgroundColor": "#e3e3e3",
"width": "100%",
"align": "right",
"height": 22,
"borderRadius": [4, 4, 0, 0],
},
"hr": {
"borderColor": "#aaa",
"width": "100%",
"borderWidth": 0.5,
"height": 0,
},
"b": {"fontSize": 16, "lineHeight": 33},
"per": {
"color": "#eee",
"backgroundColor": "#334455",
"padding": [2, 4],
"borderRadius": 2,
},
},
),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="{}Java岗位-工作经验需求占比".format(City_name), pos_left='center'),
legend_opts=opts.LegendOpts(
orient="vertical", pos_top="5%", pos_left="1%")
)
)
return c
def overlap_bar_line() -> Bar:
bar = (
Bar()
.add_xaxis(workyear)
.add_yaxis("岗位数量", value_workyear1, category_gap="50%")
.extend_axis(
yaxis=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} /月"), interval=5000
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="{}岗位数量-平均薪资对比图".format(City_name)),
yaxis_opts=opts.AxisOpts(
axislabel_opts=opts.LabelOpts(formatter="{value} 个")
),
)
)
line = Line().add_xaxis(workyear).add_yaxis("平均薪资", All_avgs, yaxis_index=1, is_smooth=True) \
.set_global_opts(title_opts=opts.TitleOpts(title="Line-smooth"))
bar.overlap(line)
return bar
page = Page()
page.add(pie_rich_label(), overlap_bar_line())
page.render('经验-薪资对比图.html'.format(City_name))
def FuLiCiYun(get_data, City_name):
Data = get_data[get_data['城市'].str.contains('{}'.format(City_name))]
max = len(Data)
information = Data['公司福利']
txtl = jieba.cut(' '.join(information), cut_all=False) # jieba分词
txt = ' '.join(txtl).replace('\n', ' ').lower()
china = re.sub('[0-9a-zA-Z_]', '', txt) # 保留汉字
for i in '!"#$%&()*+, ()【】,;0.!、:。-./:;<=>?@[\\]^_‘{|}~':
china = china.replace(i, " ") # 将文本中特殊字符替换为空格
print("请稍等,正在生成")
Idioms = china.split()
Icounts = {}
for Idiom in Idioms:
if len(Idiom) < 2: # 过滤掉单个字
continue
else:
Icounts[Idiom] = Icounts.get(Idiom, 0) + 1
chinas = list(Icounts.items())
chinas.sort(key=lambda x: x[1], reverse=True)
def wordcloud_base() -> WordCloud:
c = (
WordCloud()
.add("", chinas, word_size_range=[20, 100])
.set_global_opts(title_opts=opts.TitleOpts(title="岗位要求"))
)
return c
wordcloud_base().render(('公司福利词云图.html'))
def main():
global root, df, edul, collection, df
root = Toplevel()
root.title("单个城市岗位数据分析")
root.geometry("1200x800")
display = App(root)
client = pymongo.MongoClient()
collection = client['51Job']['Scrapy']
data = collection.find()
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import mplfinance as mpl
plt.rcParams['legend.facecolor'] = 'darkgray'
############################## RETAIL SALES ##############################
def process_retailsales(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data = data.drop('Date', axis=1)
data.IsHoliday = data.IsHoliday.apply(lambda x : int(x) if not pd.isna(x) else np.nan)
return data
def plot_retialsales(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.groupby('Store').Weekly_Sales.plot(title='Weekly Sales all stores')
_=plt.xlabel('Dates')
_=plt.ylabel('Qty')
############################## SUNSPOTS ##############################
def process_sunspots(path):
data=pd.read_csv(path, index_col=[0], usecols=[1,2], parse_dates=True)
data.index.name = 'Month'
data.columns = ['MMTS']
data = data.sort_index()
return data
def plot_sunspots(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.plot(title='Monthly Mean of Sunspots observed')
_=plt.title('Sunspots')
_=plt.ylabel('Mean Sunspots Numbers')
############################## USA ECONOMIC ##############################
def process_usaeconomic(path):
data = pd.read_csv(path, index_col=0, parse_dates=True)
data.index.name = 'Date'
return data
def plot_usaeconomic(data, style='ggplot'):
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (15,7)
plt.style.use(style)
_=data.plot(title='USA-Consumption, Income, Production, Savings & Unemployment Pct Changes')
############################## VISITORS TO 20 REGIONS ##############################
def process_20rvisitors(path):
data = | pd.read_csv(path, index_col=0, parse_dates=True) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 18:24:25 2021
@author: HASANUL
"""
import pandas as pd
from scipy import sparse
ratings = pd.read_csv('ratings.csv')
movies = | pd.read_csv('movies.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
from pandapower.plotting.generic_geodata import create_generic_coordinates
from pandapower.plotting.plotly.traces import create_bus_trace, create_line_trace, \
create_trafo_trace, draw_traces, version_check, _create_node_trace, _create_branch_trace
from pandapower.plotting.plotly.mapbox_plot import *
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def get_hoverinfo(net, element, precision=3, sub_index=None):
hover_index = net[element].index
if element == "bus":
load_str, sgen_str = [], []
for ln in [net.load.loc[net.load.bus == b, "p_mw"].sum() for b in net.bus.index]:
load_str.append("Load: {:.3f} MW<br />".format(ln) if ln != 0. else "")
for s in [net.sgen.loc[net.sgen.bus == b, "p_mw"].sum() for b in net.bus.index]:
sgen_str.append("Static generation: {:.3f} MW<br />".format(s) if s != 0. else "")
hoverinfo = (
"Index: " + net.bus.index.astype(str) + '<br />' +
"Name: " + net.bus['name'].astype(str) + '<br />' +
'V_n: ' + net.bus['vn_kv'].round(precision).astype(str) + ' kV' + '<br />' + load_str + sgen_str)\
.tolist()
elif element == "line":
hoverinfo = (
"Index: " + net.line.index.astype(str) + '<br />' +
"Name: " + net.line['name'].astype(str) + '<br />' +
'Length: ' + net.line['length_km'].round(precision).astype(str) + ' km' + '<br />' +
'R: ' + (net.line['length_km'] * net.line['r_ohm_per_km']).round(precision).astype(str)
+ ' Ohm' + '<br />'
+ 'X: ' + (net.line['length_km'] * net.line['x_ohm_per_km']).round(precision).astype(str)
+ ' Ohm' + '<br />').tolist()
elif element == "trafo":
hoverinfo = (
"Index: " + net.trafo.index.astype(str) + '<br />' +
"Name: " + net.trafo['name'].astype(str) + '<br />' +
'V_n HV: ' + net.trafo['vn_hv_kv'].round(precision).astype(str) + ' kV' + '<br />' +
'V_n LV: ' + net.trafo['vn_lv_kv'].round(precision).astype(str) + ' kV' + '<br />' +
'Tap pos.: ' + net.trafo['tap_pos'].astype(str) + '<br />').tolist()
elif element == "trafo3w":
hoverinfo = (
"Index: " + net.trafo3w.index.astype(str) + '<br />' +
"Name: " + net.trafo3w['name'].astype(str) + '<br />' +
'V_n HV: ' + net.trafo3w['vn_hv_kv'].round(precision).astype(str) + ' kV' + '<br />' +
'V_n MV: ' + net.trafo3w['vn_mv_kv'].round(precision).astype(str) + ' kV' + '<br />' +
'V_n LV: ' + net.trafo3w['vn_lv_kv'].round(precision).astype(str) + ' kV' + '<br />' +
'Tap pos.: ' + net.trafo3w['tap_pos'].astype(str) + '<br />').tolist()
elif element == "ext_grid":
hoverinfo = (
"Index: " + net.ext_grid.index.astype(str) + '<br />' +
"Name: " + net.ext_grid['name'].astype(str) + '<br />' +
'V_m: ' + net.ext_grid['vm_pu'].round(precision).astype(str) + ' p.u.' + '<br />' +
'V_a: ' + net.ext_grid['va_degree'].round(precision).astype(str) + ' °' + '<br />').tolist()
hover_index = net.ext_grid.bus.tolist()
else:
return None
hoverinfo = | pd.Series(index=hover_index, data=hoverinfo) | pandas.Series |
from kmeaningful import __version__
from kmeaningful.preprocess import preprocess
from sklearn.datasets import make_blobs
import pandas as pd
import numpy as np
import pytest
def test_version():
assert __version__ == '0.1.0'
def test_preprocess():
""" Performs tests for preprocess function """
# empty dataframe or array should throw exception
assert pytest.raises(Exception, preprocess, | pd.DataFrame({}) | pandas.DataFrame |
Subsets and Splits