prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#! /usr/bin/env python3
import pandas as pd
import pathlib
import fire
import numpy as np
BEDHEADER = [
'chrom',
'start',
'end',
'tr_id',
'score',
'strand',
'thickStart',
'thickEnd',
'itemRgb',
'blockCount',
'blockSizes',
'blockStarts'
]
class Bed(object):
def __init__(self, bedfile):
self.bed = pathlib.PurePath(bedfile)
self.bed_df = pd.read_table(bedfile,
header=None,
names=BEDHEADER)
def nearby_tr(self, bed_df,
tr_idx, num=1, distance=None,
gene_type=None):
tr_inf = bed_df.loc[tr_idx]
same_chr_tr = bed_df[bed_df.chrom == tr_inf.chrom]
same_chr_tr = bed_df[bed_df.gene_id != tr_inf.gene_id]
if gene_type:
same_chr_tr = same_chr_tr[
same_chr_tr.gene_biotype == gene_type]
up_tr_inf = same_chr_tr[same_chr_tr.index < tr_idx]
up_tr_inf = up_tr_inf.loc[up_tr_inf.index[::-1][:num]]
down_tr_inf = same_chr_tr[same_chr_tr.index > tr_idx]
down_tr_inf = down_tr_inf.loc[down_tr_inf.index[:num]]
return up_tr_inf, down_tr_inf
def get_distance(self, tr_a, tr_b):
try:
distance = max(tr_a.start - tr_b.end,
tr_b.start - tr_a.end,
0)
except ValueError:
print('tra:{a}\ntrb:{b}'.format(a=tr_a.start, b=tr_b.start))
return None
else:
return distance
def neareast_tr(self, tr_type_file, nearby_type, outfile=None):
tr_type_df = | pd.read_table(tr_type_file) | pandas.read_table |
#!/usr/bin/env python
"""Units and constants for transforming into and out of SI units.
All data is sourced from :py:mod:`scipy.constants` and :py:attr:`scipy.constants.physical_constants`. Every quantity stored in :py:class:`~solarwindpy.core.plasma.Plasma` and contained objects should have a entry in :py:class:`Constants`.
"""
import pdb # noqa: F401
import pandas as pd
from scipy import constants
from scipy.constants import physical_constants
# We rely on views via DataFrame.xs to reduce memory size and do not
# `.copy(deep=True)`, so we want to make sure that this doesn't
# accidentally cause a problem.
pd.set_option("mode.chained_assignment", "raise")
_misc_constants = {
"e0": constants.epsilon_0,
"mu0": constants.mu_0,
"c": constants.c,
# "gamma": 5.0 / 3.0,
"hbar": physical_constants["Planck constant over 2 pi"][0],
"1AU [m]": constants.au,
"Re [m]": 6378.1e3, # Earth Radius in meters
"Rs [m]": 695.508e6, # Sun Radius in meters
"gas constant": constants.R,
}
_kBoltzmann = {
"J": constants.k,
"eV": physical_constants["Boltzmann constant in eV/K"][0],
}
_polytropic_index = dict(par=3.0, per=2.0, scalar=5.0 / 3.0)
_m_in_mp = {
"p": 1.0,
"p1": 1.0,
"p2": 1.0,
"pm": 1.0,
"p_bimax": 1.0,
"a": physical_constants["alpha particle-proton mass ratio"][0],
"a1": physical_constants["alpha particle-proton mass ratio"][0],
"a2": physical_constants["alpha particle-proton mass ratio"][0],
"a_bimax": physical_constants["alpha particle-proton mass ratio"][0],
"e": physical_constants["electron-proton mass ratio"][0],
}
_charges = {
"e": -constants.e,
"p": constants.e,
"p1": constants.e,
"p2": constants.e,
"pm": constants.e,
"p_bimax": constants.e,
"a": 2.0 * constants.e,
"a1": 2.0 * constants.e,
"a2": 2.0 * constants.e,
"a_bimax": 2.0 * constants.e,
}
_charge_states = {
"e": -1.0,
"p": 1.0,
"p1": 1.0,
"p2": 1.0,
"pm": 1.0,
"p_bimax": 1.0,
"a": 2.0,
"a1": 2.0,
"a2": 2.0,
"a_bimax": 2.0,
}
_masses = {
"e": constants.m_e,
"p": constants.m_p,
"p1": constants.m_p,
"p2": constants.m_p,
"pm": constants.m_p, # proton moment
"p_bimax": constants.m_p,
"a_bimax": physical_constants["alpha particle mass"][0],
"a": physical_constants["alpha particle mass"][0],
"a1": physical_constants["alpha particle mass"][0],
"a2": physical_constants["alpha particle mass"][0],
}
_m_amu = {
"a": physical_constants["alpha particle mass in u"][0],
"a1": physical_constants["alpha particle mass in u"][0],
"a2": physical_constants["alpha particle mass in u"][0],
"a_bimax": physical_constants["alpha particle mass in u"][0],
"p": physical_constants["proton mass in u"][0],
"p1": physical_constants["proton mass in u"][0],
"p2": physical_constants["proton mass in u"][0],
"pm": physical_constants["proton mass in u"][0],
"p_bimax": physical_constants["proton mass in u"][0],
"e": physical_constants["electron mass in u"][0],
}
class Constants(object):
def __init__(self):
pass
@property
def misc(self):
return pd.Series(_misc_constants)
@property
def kb(self):
return | pd.Series(_kBoltzmann) | pandas.Series |
from __future__ import print_function
import logging
import pandas as pd
import numpy as np
import scipy.stats as stats
from matplotlib.backends.backend_pdf import PdfPages
import os.path
from .storemanager import StoreManager
from .condition import Condition
from .constants import WILD_TYPE_VARIANT
from .sfmap import sfmap_plot
from .dataframe import singleton_dataframe
from .random_effects import rml_estimator
class Experiment(StoreManager):
"""
Class for a coordinating multiple :py:class:`~.selection.Selection`
objects. Creating an
:py:class:`~experiment.Experiment` requires a valid *config* object,
usually from a ``.json`` configuration file.
"""
store_suffix = "exp"
treeview_class_name = "Experiment"
def __init__(self):
StoreManager.__init__(self)
self.conditions = list()
self._wt = None
self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__))
@property
def wt(self):
if self.has_wt_sequence():
if self._wt is None:
self._wt = self.selection_list()[0].wt.duplicate(self.name)
return self._wt
else:
if self._wt is not None:
raise ValueError(
"Experiment should not contain wild type "
"sequence [{}]".format(self.name)
)
else:
return None
def configure(self, cfg, configure_children=True):
"""
Set up the :py:class:`~experiment.Experiment` using the *cfg* object,
usually from a ``.json`` configuration file.
"""
StoreManager.configure(self, cfg)
self.logger = logging.getLogger(
"{}.{} - {}".format(__name__, self.__class__.__name__, self.name)
)
if configure_children:
if "conditions" not in cfg:
raise KeyError(
"Missing required config value {} [{}]"
"".format("conditions", self.name)
)
for cnd_cfg in cfg["conditions"]:
cnd = Condition()
cnd.configure(cnd_cfg)
self.add_child(cnd)
selection_names = [x.name for x in self.selection_list()]
if len(set(selection_names)) != len(selection_names):
raise ValueError("Non-unique selection names [{}]" "".format(self.name))
def serialize(self):
"""
Format this object (and its children) as a config object suitable for
dumping to a config file.
"""
cfg = StoreManager.serialize(self)
cfg["conditions"] = [child.serialize() for child in self.children]
return cfg
def _children(self):
"""
Method bound to the ``children`` property. Returns a list of all
:py:class:`~condition.Condition` objects belonging to this object,
sorted by name.
"""
return sorted(self.conditions, key=lambda x: x.name)
def add_child(self, child):
"""
Add a selection.
"""
if child.name in self.child_names():
raise ValueError(
"Non-unique condition name '{}' [{}]" "".format(child.name, self.name)
)
child.parent = self
self.conditions.append(child)
def remove_child_id(self, tree_id):
"""
Remove the reference to a :py:class:`~condition.Condition` with
Treeview id *tree_id*.
"""
self.conditions = [x for x in self.conditions if x.treeview_id != tree_id]
def selection_list(self):
"""
Return the :py:class:`~selection.Selection` objects as a list.
"""
selections = list()
for cnd in self.children:
selections.extend(cnd.children)
return selections
def validate(self):
"""
Calls validate on all child Conditions. Also checks the wild type
sequence status.
"""
# check the wild type sequences
if self.has_wt_sequence():
for child in self.selection_list()[1:]:
if self.selection_list()[0].wt != child.wt:
self.logger.warning("Inconsistent wild type sequences")
break
for child in self.children:
child.validate()
def is_coding(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` count protein-coding variants, else
``False``.
"""
return all(x.is_coding() for x in self.selection_list())
def has_wt_sequence(self):
"""
Return ``True`` if the all :py:class:`~selection.Selection` in the
:py:class:`~experiment.Experiment` have a wild type sequence, else
``False``.
"""
return all(x.has_wt_sequence() for x in self.selection_list())
def calculate(self):
"""
Calculate scores for all :py:class:`~selection.Selection` objects.
"""
if len(self.labels) == 0:
raise ValueError(
"No data present across all conditions [{}]" "".format(self.name)
)
for s in self.selection_list():
s.calculate()
self.combine_barcode_maps()
for label in self.labels:
self.calc_counts(label)
if self.scoring_method != "counts":
self.calc_shared_full(label)
self.calc_shared(label)
self.calc_scores(label)
if label != "barcodes":
self.calc_pvalues_wt(label)
def combine_barcode_maps(self):
"""
Combine all barcode maps for :py:class:`~selection.Selection` objects
into a single data frame and store it in ``'/main/barcodemap'``.
If multiple variants or IDs map to the same barcode, only the first one
will be present in the barcode map table.
The ``'/main/barcodemap'`` table is not created if no
:py:class:`~selection.Selection` has barcode map information.
"""
if self.check_store("/main/barcodemap"):
return
bcm = None
for sel in self.selection_list():
if "/main/barcodemap" in sel.store.keys():
if bcm is None:
bcm = sel.store["/main/barcodemap"]
else:
bcm = bcm.join(
sel.store["/main/barcodemap"], rsuffix=".drop", how="outer"
)
new = bcm.loc[ | pd.isnull(bcm) | pandas.isnull |
from django.shortcuts import render
from django.views.generic import TemplateView
import pandas as pd
from .utils import clean_html
from form_submissions.models import FormResponse
from typeforms.models import Typeform
class DashboardView(TemplateView):
template_name = 'dashboard.html'
def get(self, request, typeform_uid):
typeform = Typeform.objects.get(uid=typeform_uid)
questions = typeform.payload['questions']
df_questions = pd.DataFrame(questions)
form_responses = FormResponse.objects.filter(typeform=typeform)
answers = [each.answers for each in form_responses if each.answers]
df_answers = | pd.DataFrame(answers) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster.bicluster import SpectralCoclustering
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from itertools import product
######## practice pt1
x = | pd.Series([6, 3, 8, 6], index=["q", "w", "e", "r"]) | pandas.Series |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined]
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=idx,
)
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.index.names = ["number", "color"]
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def identify5(
x,
) -> ps.DataFrame[
[("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405
]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify5)
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
pdf = pd.DataFrame({"A": [True, True, False, False], "B": [True, False, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
def test_abs(self):
pdf = pd.DataFrame({"a": [-2, -1, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(abs(psdf), abs(pdf))
self.assert_eq(np.abs(psdf), np.abs(pdf))
def test_iteritems(self):
pdf = pd.DataFrame(
{"species": ["bear", "bear", "marsupial"], "population": [1864, 22000, 80000]},
index=["panda", "polar", "koala"],
columns=["species", "population"],
)
psdf = ps.from_pandas(pdf)
for (p_name, p_items), (k_name, k_items) in zip(pdf.iteritems(), psdf.iteritems()):
self.assert_eq(p_name, k_name)
self.assert_eq(p_items, k_items)
def test_tail(self):
pdf = pd.DataFrame({"x": range(1000)})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.tail(), psdf.tail())
self.assert_eq(pdf.tail(10), psdf.tail(10))
self.assert_eq(pdf.tail(-990), psdf.tail(-990))
self.assert_eq(pdf.tail(0), psdf.tail(0))
self.assert_eq(pdf.tail(-1001), psdf.tail(-1001))
self.assert_eq(pdf.tail(1001), psdf.tail(1001))
self.assert_eq((pdf + 1).tail(), (psdf + 1).tail())
self.assert_eq((pdf + 1).tail(10), (psdf + 1).tail(10))
self.assert_eq((pdf + 1).tail(-990), (psdf + 1).tail(-990))
self.assert_eq((pdf + 1).tail(0), (psdf + 1).tail(0))
self.assert_eq((pdf + 1).tail(-1001), (psdf + 1).tail(-1001))
self.assert_eq((pdf + 1).tail(1001), (psdf + 1).tail(1001))
with self.assertRaisesRegex(TypeError, "bad operand type for unary -: 'str'"):
psdf.tail("10")
def test_last_valid_index(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, None], "b": [1.0, 2.0, 3.0, None], "c": [100, 200, 400, None]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
self.assert_eq(pdf[[]].last_valid_index(), psdf[[]].last_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.last_valid_index(), psdf.last_valid_index())
def test_last(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.last("1D"), psdf.last("1D"))
self.assert_eq(pdf.last(DateOffset(days=1)), psdf.last(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'last' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).last("1D")
def test_first(self):
index = pd.date_range("2018-04-09", periods=4, freq="2D")
pdf = pd.DataFrame([1, 2, 3, 4], index=index)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first("1D"), psdf.first("1D"))
self.assert_eq(pdf.first(DateOffset(days=1)), psdf.first(DateOffset(days=1)))
with self.assertRaisesRegex(TypeError, "'first' only supports a DatetimeIndex"):
ps.DataFrame([1, 2, 3, 4]).first("1D")
def test_first_valid_index(self):
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=["Q", "W", "E", "R"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
self.assert_eq(pdf[[]].first_valid_index(), psdf[[]].first_valid_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
# Empty DataFrame
pdf = pd.Series([]).to_frame()
psdf = ps.Series([]).to_frame()
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
pdf = pd.DataFrame(
{"a": [None, 2, 3, 2], "b": [None, 2.0, 3.0, 1.0], "c": [None, 200, 400, 200]},
index=[
datetime(2021, 1, 1),
datetime(2021, 2, 1),
datetime(2021, 3, 1),
datetime(2021, 4, 1),
],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.first_valid_index(), psdf.first_valid_index())
def test_product(self):
pdf = pd.DataFrame(
{"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50], "C": ["a", "b", "c", "d", "e"]}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# Named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric columns
pdf = pd.DataFrame({"key": ["a", "b", "c"], "val": ["x", "y", "z"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index())
# No numeric named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# No numeric named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), almost=True)
# All NaN columns
pdf = pd.DataFrame(
{
"A": [np.nan, np.nan, np.nan, np.nan, np.nan],
"B": [10, 20, 30, 40, 50],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named columns
pdf.columns.name = "Koalas"
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN MultiIndex columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
# All NaN named MultiIndex columns
pdf.columns.names = ["Hello", "Koalas"]
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.prod(), psdf.prod().sort_index(), check_exact=False)
def test_from_dict(self):
data = {"row_1": [3, 2, 1, 0], "row_2": [10, 20, 30, 40]}
pdf = pd.DataFrame.from_dict(data)
psdf = ps.DataFrame.from_dict(data)
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, dtype="int8")
psdf = ps.DataFrame.from_dict(data, dtype="int8")
self.assert_eq(pdf, psdf)
pdf = pd.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
psdf = ps.DataFrame.from_dict(data, orient="index", columns=["A", "B", "C", "D"])
self.assert_eq(pdf, psdf)
def test_pad(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.pad(), psdf.pad())
# Test `inplace=True`
pdf.pad(inplace=True)
psdf.pad(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [None, 3, 3, 3],
"B": [2.0, 4.0, 4.0, 3.0],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.pad())
# Test `inplace=True`
psdf.pad(inplace=True)
self.assert_eq(expected, psdf)
def test_backfill(self):
pdf = pd.DataFrame(
{
"A": [None, 3, None, None],
"B": [2, 4, None, 3],
"C": [None, None, None, 1],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
self.assert_eq(pdf.backfill(), psdf.backfill())
# Test `inplace=True`
pdf.backfill(inplace=True)
psdf.backfill(inplace=True)
self.assert_eq(pdf, psdf)
else:
expected = ps.DataFrame(
{
"A": [3.0, 3.0, None, None],
"B": [2.0, 4.0, 3.0, 3.0],
"C": [1.0, 1.0, 1.0, 1.0],
"D": [0, 1, 5, 4],
},
columns=["A", "B", "C", "D"],
)
self.assert_eq(expected, psdf.backfill())
# Test `inplace=True`
psdf.backfill(inplace=True)
self.assert_eq(expected, psdf)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
psdf1 = ps.from_pandas(pdf1)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0, 1]:
psdf_l, psdf_r = psdf1.align(psdf1[["b"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf1[["b"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["a"]].align(psdf1[["b", "a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["a"]].align(pdf1[["b", "a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1[["b", "a"]].align(psdf1[["a"]], join=join, axis=axis)
pdf_l, pdf_r = pdf1[["b", "a"]].align(pdf1[["a"]], join=join, axis=axis)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psdf_r = psdf1.align(psdf1["b"], axis=0)
pdf_l, pdf_r = pdf1.align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psdf_r, pdf_r)
psdf_l, psser_b = psdf1[["a"]].align(psdf1["b"], axis=0)
pdf_l, pser_b = pdf1[["a"]].align(pdf1["b"], axis=0)
self.assert_eq(psdf_l, pdf_l)
self.assert_eq(psser_b, pser_b)
self.assertRaises(ValueError, lambda: psdf1.align(psdf1, join="unknown"))
self.assertRaises(ValueError, lambda: psdf1.align(psdf1["b"]))
self.assertRaises(TypeError, lambda: psdf1.align(["b"]))
self.assertRaises(NotImplementedError, lambda: psdf1.align(psdf1["b"], axis=1))
pdf2 = pd.DataFrame({"a": [4, 5, 6], "d": ["d", "e", "f"]}, index=[10, 11, 12])
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=1)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=1)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
def test_between_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.between_time("0:15", "0:45").sort_index(),
psdf.between_time("0:15", "0:45").sort_index(),
)
with self.assertRaisesRegex(
NotImplementedError, "between_time currently only works for axis=0"
):
psdf.between_time("0:15", "0:45", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.between_time("0:15", "0:45")
def test_at_time(self):
idx = pd.date_range("2018-04-09", periods=4, freq="1D20min")
pdf = pd.DataFrame({"A": [1, 2, 3, 4]}, index=idx)
psdf = ps.from_pandas(pdf)
psdf.at_time("0:20")
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts'
pdf.index.name = "ts"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:20").sort_index(),
psdf.at_time("0:20").sort_index(),
)
# Index name is 'ts', column label is 'index'
pdf.columns = pd.Index(["index"])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Both index name and column label are 'index'
pdf.index.name = "index"
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
# Index name is 'index', column label is ('X', 'A')
pdf.columns = pd.MultiIndex.from_arrays([["X"], ["A"]])
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.at_time("0:40").sort_index(),
psdf.at_time("0:40").sort_index(),
)
with self.assertRaisesRegex(NotImplementedError, "'asof' argument is not supported"):
psdf.at_time("0:15", asof=True)
with self.assertRaisesRegex(NotImplementedError, "at_time currently only works for axis=0"):
psdf.at_time("0:15", axis=1)
psdf = ps.DataFrame({"A": [1, 2, 3, 4]})
with self.assertRaisesRegex(TypeError, "Index must be DatetimeIndex"):
psdf.at_time("0:15")
def test_astype(self):
psdf = self.psdf
msg = "Only a column name can be used for the key in a dtype mappings argument."
with self.assertRaisesRegex(KeyError, msg):
psdf.astype({"c": float})
def test_describe(self):
pdf, psdf = self.df_pair
# numeric columns
self.assert_eq(psdf.describe(), pdf.describe())
psdf.a += psdf.a
pdf.a += pdf.a
self.assert_eq(psdf.describe(), pdf.describe())
# string columns
psdf = ps.DataFrame({"A": ["a", "b", "b", "c"], "B": ["d", "e", "f", "f"]})
pdf = psdf.to_pandas()
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
psdf.A += psdf.A
pdf.A += pdf.A
self.assert_eq(psdf.describe(), pdf.describe().astype(str))
# timestamp columns
psdf = ps.DataFrame(
{
"A": [
pd.Timestamp("2020-10-20"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2021-06-02"),
pd.Timestamp("2022-07-11"),
],
"B": [
pd.Timestamp("2021-11-20"),
pd.Timestamp("2023-06-02"),
pd.Timestamp("2026-07-11"),
pd.Timestamp("2026-07-11"),
],
}
)
pdf = psdf.to_pandas()
# NOTE: Set `datetime_is_numeric=True` for pandas:
# FutureWarning: Treating datetime data as categorical rather than numeric in `.describe` is deprecated
# and will be removed in a future version of pandas. Specify `datetime_is_numeric=True` to silence this
# warning and adopt the future behavior now.
# NOTE: Compare the result except percentiles, since we use approximate percentile
# so the result is different from pandas.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pdf.describe(datetime_is_numeric=True)
.astype(str)
.loc[["count", "mean", "min", "max"]],
)
else:
self.assert_eq(
psdf.describe(),
ps.DataFrame(
{
"A": [
"4",
"2021-07-16 18:00:00",
"2020-10-20 00:00:00",
"2020-10-20 00:00:00",
"2021-06-02 00:00:00",
"2021-06-02 00:00:00",
"2022-07-11 00:00:00",
],
"B": [
"4",
"2024-08-02 18:00:00",
"2021-11-20 00:00:00",
"2021-11-20 00:00:00",
"2023-06-02 00:00:00",
"2026-07-11 00:00:00",
"2026-07-11 00:00:00",
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
),
)
# String & timestamp columns
psdf = ps.DataFrame(
{
"A": ["a", "b", "b", "c"],
"B": [
pd.Timestamp("2021-11-20"),
pd.Timestamp("2023-06-02"),
pd.Timestamp("2026-07-11"),
pd.Timestamp("2026-07-11"),
],
}
)
pdf = psdf.to_pandas()
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pdf.describe(datetime_is_numeric=True)
.astype(str)
.loc[["count", "mean", "min", "max"]],
)
psdf.A += psdf.A
pdf.A += pdf.A
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pdf.describe(datetime_is_numeric=True)
.astype(str)
.loc[["count", "mean", "min", "max"]],
)
else:
expected_result = ps.DataFrame(
{
"B": [
"4",
"2024-08-02 18:00:00",
"2021-11-20 00:00:00",
"2021-11-20 00:00:00",
"2023-06-02 00:00:00",
"2026-07-11 00:00:00",
"2026-07-11 00:00:00",
]
},
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
)
self.assert_eq(
psdf.describe(),
expected_result,
)
psdf.A += psdf.A
self.assert_eq(
psdf.describe(),
expected_result,
)
# Numeric & timestamp columns
psdf = ps.DataFrame(
{
"A": [1, 2, 2, 3],
"B": [
pd.Timestamp("2021-11-20"),
pd.Timestamp("2023-06-02"),
pd.Timestamp("2026-07-11"),
pd.Timestamp("2026-07-11"),
],
}
)
pdf = psdf.to_pandas()
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
pandas_result = pdf.describe(datetime_is_numeric=True)
pandas_result.B = pandas_result.B.astype(str)
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pandas_result.loc[["count", "mean", "min", "max"]],
)
psdf.A += psdf.A
pdf.A += pdf.A
pandas_result = pdf.describe(datetime_is_numeric=True)
pandas_result.B = pandas_result.B.astype(str)
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pandas_result.loc[["count", "mean", "min", "max"]],
)
else:
self.assert_eq(
psdf.describe(),
ps.DataFrame(
{
"A": [4, 2, 1, 1, 2, 2, 3, 0.816497],
"B": [
"4",
"2024-08-02 18:00:00",
"2021-11-20 00:00:00",
"2021-11-20 00:00:00",
"2023-06-02 00:00:00",
"2026-07-11 00:00:00",
"2026-07-11 00:00:00",
"None",
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
),
)
psdf.A += psdf.A
self.assert_eq(
psdf.describe(),
ps.DataFrame(
{
"A": [4, 4, 2, 2, 4, 4, 6, 1.632993],
"B": [
"4",
"2024-08-02 18:00:00",
"2021-11-20 00:00:00",
"2021-11-20 00:00:00",
"2023-06-02 00:00:00",
"2026-07-11 00:00:00",
"2026-07-11 00:00:00",
"None",
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
),
)
# Include None column
psdf = ps.DataFrame(
{
"a": [1, 2, 3],
"b": [pd.Timestamp(1), pd.Timestamp(1), pd.Timestamp(1)],
"c": [None, None, None],
}
)
pdf = psdf.to_pandas()
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
pandas_result = pdf.describe(datetime_is_numeric=True)
pandas_result.b = pandas_result.b.astype(str)
self.assert_eq(
psdf.describe().loc[["count", "mean", "min", "max"]],
pandas_result.loc[["count", "mean", "min", "max"]],
)
else:
self.assert_eq(
psdf.describe(),
ps.DataFrame(
{
"a": [3.0, 2.0, 1.0, 1.0, 2.0, 3.0, 3.0, 1.0],
"b": [
"3",
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000001",
"1970-01-01 00:00:00.000001",
"None",
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max", "std"],
),
)
msg = r"Percentiles should all be in the interval \[0, 1\]"
with self.assertRaisesRegex(ValueError, msg):
psdf.describe(percentiles=[1.1])
psdf = ps.DataFrame()
msg = "Cannot describe a DataFrame without columns"
with self.assertRaisesRegex(ValueError, msg):
psdf.describe()
def test_describe_empty(self):
# Empty DataFrame
psdf = ps.DataFrame(columns=["A", "B"])
pdf = psdf.to_pandas()
self.assert_eq(
psdf.describe(),
pdf.describe().astype(float),
)
# Explicit empty DataFrame numeric only
psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pdf = psdf.to_pandas()
self.assert_eq(
psdf[psdf.a != psdf.a].describe(),
pdf[pdf.a != pdf.a].describe(),
)
# Explicit empty DataFrame string only
psdf = ps.DataFrame({"a": ["a", "b", "c"], "b": ["q", "w", "e"]})
pdf = psdf.to_pandas()
self.assert_eq(
psdf[psdf.a != psdf.a].describe(),
pdf[pdf.a != pdf.a].describe().astype(float),
)
# Explicit empty DataFrame timestamp only
psdf = ps.DataFrame(
{
"a": [pd.Timestamp(1), pd.Timestamp(1), pd.Timestamp(1)],
"b": [pd.Timestamp(1), pd.Timestamp(1), pd.Timestamp(1)],
}
)
pdf = psdf.to_pandas()
# For timestamp type, we should convert NaT to None in pandas result
# since pandas API on Spark doesn't support the NaT for object type.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
pdf_result = pdf[pdf.a != pdf.a].describe(datetime_is_numeric=True)
self.assert_eq(
psdf[psdf.a != psdf.a].describe(),
pdf_result.where(pdf_result.notnull(), None).astype(str),
)
else:
self.assert_eq(
psdf[psdf.a != psdf.a].describe(),
ps.DataFrame(
{
"a": [
"0",
"None",
"None",
"None",
"None",
"None",
"None",
],
"b": [
"0",
"None",
"None",
"None",
"None",
"None",
"None",
],
},
index=["count", "mean", "min", "25%", "50%", "75%", "max"],
),
)
# Explicit empty DataFrame numeric & timestamp
psdf = ps.DataFrame(
{"a": [1, 2, 3], "b": [pd.Timestamp(1), | pd.Timestamp(1) | pandas.Timestamp |
import re
import os
import sys
import pandas as pd
from lxml import etree
from scipy import stats
import gzip
from sqlalchemy import create_engine
class Polite():
"""
MALLET parameters used: 'output-topic-keys', 'output-doc-topics',
'word-topic-counts-file', 'topic-word-weights-file',
'xml-topic-report', 'xml-topic-phrase-report',
'diagnostics-file', 'output-state'
"""
class TableDef():
def __init__(self, index=[], cols=[]):
self.cols = cols
self.index = index
schema = dict(
DOC = TableDef(['doc_id']),
DOCTOPIC_NARROW = TableDef(['doc_id', 'topic_id']),
DOCTOPIC = TableDef(['doc_id']),
DOCWORD = TableDef(['doc_id', 'word_id']),
PHRASE = TableDef(['phrase_str']),
TOPIC = TableDef(['topic_id']),
TOPICPHRASE = TableDef(['topic_id', 'topic_phrase']),
TOPICWORD_DIAGS = TableDef(['topic_id', 'word_id']),
TOPICWORD_NARROW = TableDef(['word_id', 'topic_id']),
TOPICWORD_WEIGHTS = TableDef(['topic_id', 'word_str']),
TOPICWORD = TableDef(['word_id']),
VOCAB = TableDef(['word_id'])
)
def __init__(self, config_file, tables_dir='./', save_mode='csv'):
"""Initialize MALLET with trial name"""
self.config_file = config_file
self.tables_dir = tables_dir
self._convert_config_file()
self.save_mode = save_mode
if self.save_mode == 'sql':
engine = create_engine(f'sqlite:///{self.tables_dir}model.db', echo=True)
self.db = engine.connect()
def __del__(self):
if self.save_mode == 'sql':
self.db.close()
def save_table(self, df, table_name):
self.schema[table_name].cols = df.columns
if self.save_mode == 'sql':
df.to_sql(table_name, self.db, if_exists='replace', index=True)
elif self.save_mode == 'csv':
df.to_csv(self.tables_dir + f'{table_name}.csv')
def get_table(self, table_name):
index_cols = self.schema[table_name].index
if self.save_mode == 'sql':
df = pd.read_sql_table(table_name, self.db,
index_col = index_cols)
elif self.save_mode == 'csv':
df = pd.read_csv(self.tables_dir + f'{table_name}.csv',
index_col=index_cols)
else:
raise ValueError("No save method!")
return df
def _convert_config_file(self):
"""Converts the MALLLET config file into a Python dictionary."""
self.config = {}
with open(self.config_file, 'r') as cfg:
for line in cfg.readlines():
if not re.match(r'^#', line):
a, b = line.split()
b = b.strip()
if re.match(r'^\d+$', b):
b = int(b)
elif re.match(r'^\d+\.\d*$', b):
b = float(b)
elif re.match(r'^TRUE$', b, flags=re.IGNORECASE):
b = True
elif re.match(r'^FALSE$', b, flags=re.IGNORECASE):
b = False
self.config[a] = b
# config = pd.DataFrame(self.config)
def get_source_file(self, src_file_key):
src_file = self.config[src_file_key]
if not os.path.isfile(src_file):
print(f"File {src_file} for {src_file_key} does not exist. Try running MALLET first.")
sys.exit(1)
else:
return src_file
def import_table_state(self):
"""Import the state file into docword table"""
src_file = self.get_source_file('output-state')
with gzip.open(src_file, 'rb') as f:
docword = pd.DataFrame(
[line.split() for line in f.readlines()[3:]],
columns=['doc_id', 'src', 'word_pos', 'word_id', 'word_str', 'topic_id'])
docword = docword[['doc_id', 'word_id', 'word_pos', 'topic_id']]
docword = docword.astype('int')
docword = docword.set_index(['doc_id', 'word_id'])
# SAVE
self.save_table(docword, 'DOCWORD')
def import_table_topic(self):
"""Import data into topic table"""
src_file = self.get_source_file('output-topic-keys')
topic = pd.read_csv(src_file, sep='\t', header=None, index_col='topic_id',
names=['topic_id', 'topic_alpha', 'topic_words'])
topic['topic_alpha_zscore'] = stats.zscore(topic.topic_alpha)
# SAVE
self.save_table(topic, 'TOPIC')
def import_tables_topicword_and_word(self):
"""Import data into topicword and word tables"""
src_file = self.get_source_file('word-topic-counts-file')
WORD = []
TOPICWORD = []
with open(src_file, 'r') as src:
for line in src.readlines():
row = line.strip().split()
word_id, word_str = row[0:2]
WORD.append((int(word_id), word_str))
for item in row[2:]:
topic_id, word_count = item.split(':')
TOPICWORD.append((int(word_id), int(topic_id), int(word_count)))
# May use schema for indexes
word = | pd.DataFrame(WORD, columns=['word_id', 'word_str']) | pandas.DataFrame |
#!/usr/bin/env python3
from __future__ import print_function
from collections import defaultdict as dd
from collections import Counter
import os
import pysam
import argparse
from operator import itemgetter
import pandas as pd
import numpy as np
import scipy.stats as ss
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
#Illustrator compatibility
new_rc_params = {'text.usetex': False, "svg.fonttype": 'none'}
matplotlib.rcParams.update(new_rc_params)
sns.set_palette('viridis', n_colors=2)
from matplotlib import gridspec
from matplotlib.patches import ConnectionPatch
from uuid import uuid4
import gzip
class Gene:
def __init__(self, ensg, name):
self.ensg = ensg
self.name = name
self.tx_start = None
self.tx_end = None
self.cds_start = None
self.cds_end = None
self.exons = []
def add_exon(self, block):
assert len(block) == 2
assert block[0] < block[1]
self.exons.append(block)
self.exons = sorted(self.exons, key=itemgetter(0))
def add_tx(self, block):
assert len(block) == 2
assert block[0] < block[1]
if self.tx_start is None or self.tx_start > block[0]:
self.tx_start = block[0]
if self.tx_end is None or self.tx_end < block[1]:
self.tx_end = block[1]
def add_cds(self, block):
assert len(block) == 2
assert block[0] <= block[1]
if self.cds_start is None or self.cds_start > block[0]:
self.cds_start = block[0]
if self.cds_end is None or self.cds_end < block[1]:
self.cds_end = block[1]
def has_tx(self):
return None not in (self.tx_start, self.tx_end)
def has_cds(self):
return None not in (self.cds_start, self.cds_end)
def merge_exons(self):
new_exons = []
if len(self.exons) == 0:
return
last_block = self.exons[0]
for block in self.exons[1:]:
if min(block[1], last_block[1]) - max(block[0], last_block[0]) > 0: # overlap
last_block = [min(block[0], last_block[0]), max(block[1], last_block[1])]
else:
new_exons.append(last_block)
last_block = block
new_exons.append(last_block)
self.exons = new_exons
class Read:
def __init__(self, read_name, cpg_loc, llr, phase=None):
self.read_name = read_name
self.llrs = {}
self.meth_calls = {}
self.phase = phase
self.add_cpg(cpg_loc, llr)
def add_cpg(self, cpg_loc, llr, cutoff = 2.5):
#assert abs(llr) > cutoff
self.llrs[cpg_loc] = llr
if llr > cutoff:
self.meth_calls[cpg_loc] = 1
elif llr < -1*cutoff:
self.meth_calls[cpg_loc] = -1
else:
self.meth_calls[cpg_loc] = 0
def exclude_ambiguous_reads(fn, chrom, start, end, min_mapq=10, tag_untagged=False, ignore_tags=False):
reads = {}
bam = pysam.AlignmentFile(fn)
for read in bam.fetch(chrom, start, end):
p = read.get_reference_positions()
if p[0] < start or p[-1] > end:
if read.mapq >= min_mapq:
phase = None
if tag_untagged or ignore_tags:
phase = 'unphased'
HP = None
PS = None
if not ignore_tags:
for tag in read.get_tags():
if tag[0] == 'HP':
HP = tag[1]
if tag[0] == 'PS':
PS = tag[1]
if None not in (HP, PS):
phase = str(PS) + ':' + str(HP)
reads[read.query_name] = phase
return reads
def get_ambiguous_reads(fn, chrom, start, end, min_mapq=10, w=50):
reads = []
bam = pysam.AlignmentFile(fn)
for read in bam.fetch(chrom, start, end):
p = read.get_reference_positions()
if read.mapq < min_mapq or (p[0] > start-w and p[-1] < end+w):
reads.append(read.query_name)
return reads
def get_reads(fn, chrom, start, end, min_mapq=10, tag_untagged=False, ignore_tags=False):
reads = {}
bam = pysam.AlignmentFile(fn)
for read in bam.fetch(chrom, start, end):
if read.mapq >= min_mapq:
phase = None
if tag_untagged or ignore_tags:
phase = 'unphased'
HP = None
PS = None
if not ignore_tags:
for tag in read.get_tags():
if tag[0] == 'HP':
HP = tag[1]
if tag[0] == 'PS':
PS = tag[1]
if None not in (HP, PS):
phase = str(PS) + ':' + str(HP)
reads[read.query_name] = phase
return reads
def slide_window(meth_table, phase, width=20, slide=2):
midpt_min = min(meth_table['loc'])
midpt_max = max(meth_table['loc'])
phase_table = meth_table.loc[meth_table['phase'] == phase]
win_start = int(midpt_min - width/2)
win_end = win_start + width
meth_frac = {}
meth_n = {}
while int((win_start+win_end)/2) < midpt_max:
win_start += slide
win_end += slide
meth_count = len(meth_table.loc[(meth_table['phase'] == phase) & (meth_table['loc'] > win_start) & (meth_table['loc'] < win_end) & (meth_table['call'] == 1)])
unmeth_count = len(meth_table.loc[(meth_table['phase'] == phase) & (meth_table['loc'] > win_start) & (meth_table['loc'] < win_end) & (meth_table['call'] == -1)])
midpt = int((win_start+win_end)/2)
if meth_count + unmeth_count > 0:
meth_frac[midpt] = meth_count/(meth_count+unmeth_count)
meth_n[midpt] = meth_count+unmeth_count
return meth_frac, meth_n
def smooth(x, window_len=8, window='hanning'):
''' modified from scipy cookbook: https://scipy-cookbook.readthedocs.io/items/SignalSmooth.html '''
assert window_len % 2 == 0, '--smoothwindowsize must be an even number'
assert x.ndim == 1
assert x.size > window_len
if window_len<3:
return x
assert window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y[(int(window_len/2)-1):-(int(window_len/2))]
def mask_methfrac(data, cutoff=20):
data = np.asarray(data)
data = data > int(cutoff)
segs = []
in_seg = False
seg_start = 0
for i in range(len(data)):
if data[i]:
if in_seg:
segs.append(list(range(seg_start, i)))
in_seg = False
else:
if not in_seg:
seg_start = i
in_seg = True
if in_seg:
segs.append(list(range(seg_start, len(data))))
return segs
def build_genes(gtf, chrom, start, end):
genes = {}
for line in gtf.fetch(chrom, start, end):
chrom, source, feature, start, end, score, strand, frame, attribs = line.split('\t')
block = [int(start), int(end)]
attribs = attribs.strip()
attr_dict = {}
for attrib in attribs.split(';'):
if attrib:
key, val = attrib.strip().split()[:2]
key = key.strip()
val = val.strip().strip('"')
attr_dict[key] = val
if 'gene_id' not in attr_dict:
continue
if 'gene_name' not in attr_dict:
continue
ensg = attr_dict['gene_id']
name = attr_dict['gene_name']
if ensg not in genes:
genes[ensg] = Gene(ensg, name)
if feature == 'exon':
genes[ensg].add_exon(block)
if feature == 'CDS':
genes[ensg].add_cds(block)
if feature == 'transcript':
genes[ensg].add_tx(block)
return genes
def main(args):
# set up
assert ':' in args.interval
assert '-' in args.interval
chrom, pos = args.interval.split(':')
elt_start, elt_end = map(int, pos.split('-'))
bamname = '.'.join(os.path.basename(args.bam).split('.')[:-1])
fn_prefix = bamname + '.' + '_'.join(args.interval.split(':')[:2])
# highlight
h_start = []
h_end = []
h_cpg_start = []
h_cpg_end = []
h_colors = sns.color_palette("Blues", n_colors=len(args.highlight.split(',')))
# get relevant genome chunk to tmp tsv
meth_tbx = pysam.Tabixfile(args.methdata)
tmp_methdata = fn_prefix+'.tmp.methdata.tsv'
with open(tmp_methdata, 'w') as meth_out:
# header
with gzip.open(args.methdata, 'rt') as _:
for line in _:
assert line.startswith('chromosome')
meth_out.write(line)
break
assert chrom in meth_tbx.contigs
for rec in meth_tbx.fetch(chrom, elt_start, elt_end):
meth_out.write(str(rec)+'\n')
# index by read_name
methdata = pd.read_csv(tmp_methdata, sep='\t', header=0, index_col=4)
if not args.keep_tmp_table:
os.remove(tmp_methdata)
# get list of relevant reads (exludes reads not anchored outside interval)
reads = {}
if args.excl_ambig:
reads = exclude_ambiguous_reads(args.bam, chrom, elt_start, elt_end, tag_untagged=args.tag_untagged, ignore_tags=args.ignore_tags)
else:
reads = get_reads(args.bam, chrom, elt_start, elt_end, tag_untagged=args.tag_untagged, ignore_tags=args.ignore_tags)
readnames = []
for r in reads.keys():
if r in methdata.index:
readnames.append(r)
if args.unambig_highlight and args.highlight:
h_coords = []
for h in args.highlight.split(','):
if ':' in h:
h = h.split(':')[-1]
h_coords += map(int, h.split('-'))
h_coords.sort()
h_min, h_max = h_coords[0], h_coords[-1]
excl_reads = get_ambiguous_reads(args.bam, chrom, h_min, h_max)
print(excl_reads)
new_reads = []
for read in readnames:
if read not in excl_reads:
new_reads.append(read)
readnames = new_reads
methdata = methdata.loc[readnames]
methreads = {}
for index, row in methdata.iterrows():
r_start = row['start']
r_end = row['end']
llr = row['log_lik_ratio']
seq = row['sequence']
# get per-CG position (nanopolish/calculate_methylation_frequency.py)
cg_pos = seq.find("CG")
first_cg_pos = cg_pos
while cg_pos != -1:
cg_start = r_start + cg_pos - first_cg_pos
cg_pos = seq.find("CG", cg_pos + 1)
cg_elt_start = cg_start - elt_start
if cg_start >= elt_start and cg_start <= elt_end:
#print (cg_start, cg_elt_start, llr, index)
if index not in methreads:
methreads[index] = Read(index, cg_elt_start, llr, phase=reads[index])
else:
methreads[index].add_cpg(cg_elt_start, llr)
# table for plotting
meth_table = dd(dict)
phase_order = []
for name, read in methreads.items():
if read.phase is None:
continue
for loc in read.llrs.keys():
uuid = str(uuid4())
meth_table[uuid]['loc'] = loc
meth_table[uuid]['llr'] = read.llrs[loc]
meth_table[uuid]['read'] = name
meth_table[uuid]['phase'] = read.phase
meth_table[uuid]['call'] = read.meth_calls[loc]
if read.phase not in phase_order:
phase_order.append(read.phase)
meth_table = pd.DataFrame.from_dict(meth_table).T
meth_table['loc'] = | pd.to_numeric(meth_table['loc']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 10:46:32 2020
@author: OscarFlores-IFi
"""
#%%=========================================================================================================
# Librerías necesarias para correr el código
#===========================================================================================================
import time
import warnings
import os
import os.path
from concurrent.futures import ThreadPoolExecutor, as_completed
# import socket
# from multiprocessing.pool import Pool
# from multiprocessing import freeze_support
# from os import cpu_count
from datetime import date
from datetime import timedelta
import urllib.request
import json
import pandas as pd
import numpy as np
#%%=========================================================================================================
# Definición de Funciones Utilizadas
#===========================================================================================================
# Función generadora de enlaces URL's para el Servicio Web de CENACE
def ruta_descarga(zona_sel, Inicial, Mercado):
"""Esta función se encarga de generar la ruta de descarga a partir del formato del Servicio Web de CENACE. Para
hacer que dicha función trabaje, es necesario introducir una zona o selección de zonas (formato de arreglo) y la
fecha inicial a partir de la cual se quieren comenzar a recoger los datos."""
def Mes(fecha):
"""Esta función se encarga de agregar un 0 a los primeros 9 meses del año introduciendo como variable un elemento
que se encuentre en formato de fecha (perteneciente a la librería "datetime")."""
if fecha.month < 10:
return "0" + str(fecha.month)
else:
return str(fecha.month)
def Dia(fecha):
"""Esta función se encarga de agregar un 0 a los primeros 9 días del mes introduciendo como variable un elemento que
se encuentre en formato de fecha (perteneciente a la librería "datetime")."""
if fecha.day < 10:
return "0" + str(fecha.day)
else:
return str(fecha.day)
# Ruta base para la descarga
ruta = "https://ws01.cenace.gob.mx:8082/SWPEND/SIM/"
# Sistema Interconectado: Sistema Interconectado Nacional (SIN), Sistema Interconectado Baja Californa (BCA) y
# Sistema Interconectado Baja California Sur (BCS)
sis_int = "SIN"
# Proceso que se va a utilizar: MDA o MTR
proc = Mercado # MDA / MTR
# Se juntan las variables anteriores en una sola ruta
ruta = ruta + sis_int + "/" + proc + "/"
# Se agrega la zona (o las zonas) a la ruta del URL. En caso de tener múltiples zonas, se agregan comas para separar
# los lugares que se desean analizar
for zona in (zona_sel):
ruta = ruta + zona + ","
# Con la variable de fecha que se introduce (desde qué fecha se desea descargar), se agregan los datos al URL
ruta = ruta[:-1] + "/" + str(Inicial.year) + "/" + Mes(Inicial) + "/" + Dia(Inicial) + "/"
# Se define la fecha final con un desplazamiento de 6 días (tiempo límite de consulta con el Servicio Web = 7 días)
Final = Inicial + timedelta(days = 6)
# Se añaden los nuevos datos al URL
ruta = ruta + str(Final.year) + "/" + Mes(Final) + "/" + Dia(Final) + "/"
# Formato de salida: XML o JSON (en minúsculas)
formato = "json"
# Se crea el URL final de acceso al Servicio Web
ruta = ruta + formato
return ruta
# Función que permite extraer la información de la llamada al Servicio Web en formato JSON
def getDF(ruta):
"""Esta función se encarga de acceder a la información de Internet y extraer los datos."""
try:
# Se genera una variable que almacena los datos de la llamada al URL
data = json.loads(urllib.request.urlopen(ruta).read())
# El JSON se utiliza como diccionario y se obtiene el "key" que contiene la información necesaria.
resultados = {key: value for key, value in data.items() if key == "Resultados"}.get("Resultados")
# Se hace de manera iterativa en caso de que sean más de una zona de carga y se agrega a un dataframe
df = pd.concat([pd.DataFrame(pd.DataFrame(i)["Valores"].tolist()).join(pd.DataFrame(i)["zona_carga"]) for i in resultados])
print('.')
return [df]
except Exception as err:
print(err)
print('/')
return [[None]]
# Esta función se encarga de renombrar las columnas originales de la llamada en JSON en las columnas finales de la base
# de datos
def Renombrar(archivo_csv):
"""Esta función se encarga de renombrar las columnas originales de la llamada en JSON."""
archivo_csv.columns = [column.replace("pz_ene","Componente_Energia") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("pz_cng","Componente_Congestion") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("pz_per","Componente_Perdidas") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("zona_carga","Zona_de_Carga") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("pz","Precio_Zonal") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("fecha","Fecha") for column in archivo_csv.columns]
archivo_csv.columns = [column.replace("hora","Hora") for column in archivo_csv.columns]
# Esta función se encarga de añadir el día de la semana correspondiente a la fecha en la base de datos
def Dia_Semana(archivo_csv):
"""Esta función se encarga de añadir el día de la semana correspondiente (Lunes, Martes, Miércoles, etc.)."""
days = {0:"Lunes", 1:"Martes", 2:"Miercoles", 3: "Jueves", 4:"Viernes", 5:"Sabado", 6:"Domingo"}
archivo_csv["Fecha"] = pd.to_datetime(archivo_csv["Fecha"])
archivo_csv["Dia_de_la_semana"] = archivo_csv["Fecha"].dt.dayofweek
archivo_csv["Dia_de_la_semana"] = archivo_csv["Dia_de_la_semana"].apply(lambda x: days[x])
# Esta función se encarga de informar si el día es festivo y a qué festividad corresponde
def Festivos(archivo_csv):
"""Esta función se encarga de agregar el día festivo, tanto en fecha como en qué día festivo."""
ruta = r"Festivos.csv"
try:
Fest = pd.read_csv(ruta)
Fest["Fecha"]= | pd.to_datetime(Fest["Fecha"]) | pandas.to_datetime |
'''
Urban-PLUMBER processing code
Associated with the manuscript: Harmonized, gap-filled dataset from 20 urban flux tower sites
Copyright (c) 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
'''
__title__ = "Quality control observations"
__version__ = "2021-09-20"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
import xarray as xr
import pandas as pd
import ephem
import matplotlib.pyplot as plt
import os
import requests
import pipeline_functions
pd.plotting.register_matplotlib_converters()
img_fmt = 'png'
################################################################################
def main(ds, sitedata, siteattrs, sitepath, plotdetail=False):
sitename = siteattrs['sitename']
window = 30
sigma = 4 # initial sigma at 4 standard deviations
offset_from_utc = ds.local_utc_offset_hours
# get all variables in dataset
all_variables = list(ds.keys())
# get qc fluxes
qc_list = [x for x in all_variables if '_qc' in x]
# remove qc variables from list
all_fluxes = [x for x in all_variables if '_qc' not in x]
# remove Rainf and Snowf from sigma filtering set
sigma_fluxes = [n for n in all_fluxes if n not in ['Rainf','Snowf']]
# clean raw obs (including filled by nearby obs)
to_clean = ds.to_dataframe()[all_fluxes]
print('removing out-of-range values')
cleaned1 = clean_out_of_range(to_clean,siteattrs)
print('removing night periods for SW')
# segmentation fault from ephem for these sites, use different method
if sitename in ['FI-Kumpula','NL-Amsterdam','AU-SurreyHills','CA-Sunset']:
cleaned2 = clean_night2(cleaned1,sitedata,siteattrs,offset_from_utc)
else:
cleaned2 = clean_night(cleaned1,sitedata,siteattrs,offset_from_utc)
print('removing values constant for 4 or more timesteps')
cleaned3 = clean_constant(cleaned2,siteattrs)
print(f'removing outliers > x sigma for each hour in {window} day window')
# pass cleaned fluxes through outlier check based on standard deviation x sigma for each hour, until clean
loop = 0
sigma_clean = cleaned3.copy()[sigma_fluxes]
while True:
# create sigma threshold for each flux being cleaned
sigma = 4 if loop == 0 else 5
sigma_vals = pd.Series(data=sigma,index=sigma_clean.columns.tolist())
if sitename in ['NL-Amsterdam']:
print('lowering sigma threshold for amsterdam Qh, Qle')
sigma_vals[['Qh','Qle']] = sigma_vals[['Qh','Qle']] - 1
print(f'pass: {loop}')
sigma_flagged, sigma_clean = calc_sigma_data(sigma_clean,sigma_vals=sigma_vals,sitepath=sitepath,window=window,plotdetail=False)
print('flagged results')
print(sigma_flagged.describe())
loop +=1
# finish when no additional outliers are found
if sigma_flagged.count().sum() == 0:
# calculate dirty from those removed in sigma cleaning
dirty = cleaned3[sigma_fluxes].where(sigma_clean.isna())
print('\nsigma outliers: \n%s\n' %dirty.count())
print(f'saving sigma stats to {sitename}_outliers_sigma.csv\n')
dirty.to_csv(f'{sitepath}/processing/{sitename}_outliers_sigma.csv')
break
# # remove outlier data for select fluxes
clean = cleaned3.copy()
clean[sigma_fluxes] = sigma_clean[sigma_fluxes]
##########################################
# manual corrections from visual inspection (not picked up automatically)
if sitename == 'CA-Sunset':
# remove unrealistically low LW measurements
clean.loc['2014-04-30 18:00':'2014-04-30 22:00',['LWdown','LWup']] = np.nan
clean.loc['2014-05-03 17:00':'2014-05-03 19:00',['LWdown','LWup']] = np.nan
clean.loc['2014-05-05 18:00':'2014-05-05 22:00',['LWdown','LWup']] = np.nan
# remove unrealistically low SW measurements
clean.loc['2014-11-17 19:30':'2014-11-17 20:00',['SWdown','SWup']] = np.nan
# remove negative Qair values
clean['Qair'] = clean['Qair'].where(clean['Qair']>0)
# remove zero valued wind before 2012-10-28
clean.loc[:'2012-10-28 19:30:00',['Wind_N','Wind_E']] = np.nan
if sitename == 'US-Baltimore':
# remove spurious Qair outlier
clean.loc['2003-09-29 06:00:00','Qair'] = np.nan
# remove spurious nightime SWdown in September 2003
clean.loc['2003-09-06':'2003-09-08 08:00','SWdown'] = np.nan
clean.loc['2003-09-05 22:00:00','SWdown'] = np.nan
# remove spike in Tair
clean.loc['2002-07-10 10:00:00','Tair'] = np.nan
clean.loc['2002-07-10 20:00:00','Tair'] = np.nan
clean.loc['2003-10-08 17:00:00','Tair'] = np.nan
if sitename == 'PL-Lipowa':
# remove spurious wind
clean.loc['2010-01-09 18:00':'2010-01-21 06:00',['Wind_N','Wind_E']] = np.nan
if sitename == 'PL-Narutowicza':
# remove spurious wind (other periods match very well with ERA5, these diverge considerably)
clean.loc['2009-01-11',['Wind_N','Wind_E']] = np.nan
clean.loc['2009-06-29':'2009-07-04',['Wind_N','Wind_E']] = np.nan
clean.loc['2010-01-09 18:00':'2010-01-22',['Wind_N','Wind_E']] = np.nan
clean.loc['2010-12-12':'2010-12-17',['Wind_N','Wind_E']] = np.nan
if sitename == 'MX-Escandon':
# remove spurious pressure:
clean.loc['2012-06-05 02:00:00',['PSurf','Qair','Qtau']] = np.nan
clean.loc['2012-06-24 02:30:00',['PSurf','Qair','Qtau']] = np.nan
clean.loc['2012-06-24 02:00:00',['PSurf','Qair','Qtau']] = np.nan
if sitename == 'JP-Yoyogi':
# remove unrealistically low Qair values
clean['Qair'] = np.where(clean['Qair']<0.0002, np.nan, clean['Qair'])
##########################################
# calculate dirty from all missing clean
dirty = to_clean.where(clean.isna())
dirty.to_csv(f'{sitepath}/processing/{sitename}_dirty.csv')
# collect observed only to ascertain "missing"
obs_only,fill_only = pd.DataFrame(),pd.DataFrame()
for flux in all_fluxes:
obs_only[flux] = to_clean[flux].where(ds[f'{flux}_qc'].to_series() == 0)
fill_only[flux] = to_clean[flux].where(ds[f'{flux}_qc'].to_series() == 1)
# gather quality stats
stats = pd.DataFrame({
'missing' : 1. - to_clean.count()/len(to_clean),
'qc_flagged' : dirty.count()/len(to_clean),
'filled' : fill_only.count()/len(to_clean),
'available' : clean.count()/len(to_clean),
})
print(stats)
print(f'saving stats to {sitename}_cleanstats.csv\n')
stats.to_csv(f'{sitepath}/processing/{sitename}_cleanstats.csv',float_format='%.4f')
# place qc flags back in to cleaned df if still present, or fill with qc=3 if missing
for key in qc_list:
orig_qc = ds.to_dataframe()[qc_list][key]
clean[key] = np.where(clean[key[:-3]].isna(), 3, orig_qc)
if plotdetail:
# convert to dataframe in local time
local_clean = convert_utc_to_local(clean.copy(), ds.local_utc_offset_hours)
local_dirty = convert_utc_to_local(dirty.copy(), ds.local_utc_offset_hours)
# plot strings dictionary
plt_str = {}
plt_str['time_start'] = 'start date: %s' %(convert_utc_to_local_str(ds.time_coverage_start, ds.local_utc_offset_hours))
plt_str['time_end'] = 'end date: %s' %(convert_utc_to_local_str(ds.time_coverage_end, ds.local_utc_offset_hours))
plt_str['days'] = 'period: %s days' %((pd.to_datetime(ds.time_coverage_end) - pd.to_datetime(ds.time_coverage_start)).days + 1)
plt_str['interval'] = 'interval: %s s' %(ds.timestep_interval_seconds)
plt_str['timesteps'] = 'timesteps: %s' %(len(ds.time))
for flux in all_fluxes:
# plot strings dictionary for flux
plt_str['missing'] = 'missing: %.2f %%' %(100*stats.loc[flux,'missing'])
plt_str['qcflags'] = 'QC flag: %.2f %%' %(100*stats.loc[flux,'qc_flagged'])
plt_str['filled'] = 'filled: %.2f %%' %(100*stats.loc[flux,'filled'])
plt_str['available'] = 'available: %.2f %%' %(100*stats.loc[flux,'available'])
# plot_qc_timeseries(ds,local_clean,local_dirty,plt_str,flux,sitename,sitepath,saveplot=True)
plot_qc_diurnal(ds,local_clean,local_dirty,plt_str,flux,sitename,sitepath,saveplot=True)
plot_all_obs(clean,dirty,stats,sitename,sitepath,all_fluxes,qc_list,saveplot=True)
plt.close('all')
clean_ds = clean.to_xarray()
return clean_ds
################################################################################
def clean_out_of_range(df,siteattrs):
# alma expected range of values, per:
# https://www.lmd.jussieu.fr/~polcher/ALMA/qc_values_3.html#A1
alma_ranges = pd.DataFrame({
'SWnet' : (0,1200),
'LWnet' : (-500,510),
'Qle' : (-700,700),
'Qh' : (-600,600),
'SWup' : (0,1360), # new
'LWup' : (0,1000), # new
'Qg' : (-500,500),
'Qtau' : (-100,100),
'Snowf' : (0,0.0085),
'Rainf' : (0,0.02),
'Evap' : (-0.0003,0.0003),
'Qs' : (0,5),
'Qsb' : (0,5),
'Qsm' : (0,0.005),
'Qfz' : (0,0.005),
'DelSoilMoist' : (-2000,2000),
'DelSWE' : (-2000,2000),
'DelIntercept' : (-100,100),
'SnowT' : (213,280),
'VegT' : (213,333),
'BaresoilT' : (213,343),
'AvgSurfT' : (213,333),
'RadT' : (213,353),
'Albedo' : (0,1),
'SWE' : (0,2000),
'SurfStor' : (0,2000),
'SnowFrac' : (0,1),
'SAlbedo' : (0,1),
'CAlbedo' : (0,1),
'UAlbedo' : (0,1),
'SoilMoist' : (0,2000),
'SoilTemp' : (213,343), # increase max + 10 for Phoenix
'TVeg' : (-0.0003,0.0003),
'ESoil' : (-0.0003,0.0003),
'RootMoist' : (0,2000),
'SoilWet' : (-0.2,1.2),
'ACond' : (0,1),
'SWdown' : (0,1360),
'LWdown' : (0,750),
'Tair' : (213,333),
'Tair2m' : (213,333), # new
'Qair' : (0,0.03),
'PSurf' : (5000,110000),
'Wind' : (-75,75),
'Wind_N' : (-75,75), # new
'Wind_E' : (-75,75), # new
},index=('min','max'))
# remove ranges
clean = df.where ( (df >= alma_ranges.loc['min',:]) & (df <= alma_ranges.loc['max',:]) )
# remove RH above 101 (requires Qair, Tair and PSurf to be valid)
RH = pipeline_functions.convert_qair_to_rh(clean.Qair, clean.Tair, clean.PSurf)
clean['Qair'] = np.where(RH>101,np.nan,clean['Qair'])
dirty = df.where(clean.isna())
print('out of range: \n%s\n' %dirty.count())
print(f'saving out-of-range stats to {siteattrs["sitename"]}_outliers_range.csv\n')
dirty.to_csv(f'{siteattrs["sitepath"]}/processing/{siteattrs["sitename"]}_outliers_range.csv')
return clean
def clean_night(df,sitedata,siteattrs,offset_from_utc):
'''
flags as dirty night periods for shortwave
calculate night using PyEphem sunset/sunrise + civil twilight (-6°)
'''
local_df = convert_utc_to_local(df.copy(), offset_from_utc)
# Make ephem observer
site = ephem.Observer()
site.lon = str(sitedata['longitude'])
site.lat = str(sitedata['latitude'])
site.elev = sitedata['ground_height']
# relocate the horizon to get twilight times
site.horizon = '-6' #-6=civil twilight, -12=nautical, -18=astronomical
# timestep adjustment
ts = df.index[1] - df.index[0]
new_group_list = []
local_sw = local_df[['SWdown','SWup']]
if local_sw.count().sum()==0:
print('no valid SWdown or SWup values')
# return
for date, group in local_df.loc[:,['SWdown','SWup']].groupby(lambda x: x.date):
# utc midday
utc_midday = pd.Timestamp(date) + pd.Timedelta(hours=12) - pd.Timedelta(hours=offset_from_utc)
site.date = str(pd.Timestamp(utc_midday))
try:
utc_sunrise = site.previous_rising(ephem.Sun())
utc_solarnoon = site.next_transit(ephem.Sun(), start=utc_sunrise)
utc_sunset = site.next_setting(ephem.Sun())
local_sunrise = ephem.Date(utc_sunrise + offset_from_utc/24.)
local_solarnoon = ephem.Date(utc_solarnoon + offset_from_utc/24.)
local_sunset = ephem.Date(utc_sunset + offset_from_utc/24.)
# include timestep adjustment for observation period
local_sunrise_ts = pd.Timestamp( local_sunrise.datetime() + ts ).strftime('%H:%M')
local_solarnoon_ts = pd.Timestamp( local_solarnoon.datetime() + ts ).strftime('%H:%M')
local_sunset_ts = pd.Timestamp( local_sunset.datetime() + ts ).strftime('%H:%M')
# print('local morning twilight:', local_sunrise_ts )
# print('local evening twilight:', local_sunset_ts )
# print('local solar noon:', local_solarnoon_ts )
day = group.between_time(start_time=local_sunrise_ts,end_time=local_sunset_ts)
except Exception as e:
print('WARNING: Ephem calculation failed')
print(e)
pass
# day = group
night = group[~group.index.isin(day.index)]
# remove any values which are not zero during night
night = night.where(night==0)
# remove all night SWup values (not analysed)
night['SWup'] = np.nan
new_group = pd.concat([day,night])
new_group_list.append(new_group)
clean_local_sw = pd.concat(new_group_list).sort_index()
local_df[['SWdown','SWup']] = clean_local_sw
clean = convert_local_to_utc(local_df, offset_from_utc)
dirty = df.where(clean.isna())
print('night: \n%s\n' %dirty.count())
print(f'saving night stats to {siteattrs["sitename"]}_outliers_night.csv\n')
dirty.to_csv(f'{siteattrs["sitepath"]}/processing/{siteattrs["sitename"]}_outliers_night.csv')
return clean
def clean_night2(df,sitedata,siteattrs,offset_from_utc):
'''
At some locations EPHEM module throws segmentation fault (!)
therefore use alternative web-based solution (is very slow)
calculate night using https://sunrise-sunset.org/api
'''
# timestep adjustment
ts = df.index[1] - df.index[0]
new_group_list = []
sw = df[['SWdown','SWup']]
if sw.count().sum()==0:
print('no valid SWdown or SWup values')
# return
for date, group in sw.groupby(lambda x: x.date):
print(date)
date_str = date.strftime('%Y-%m-%d')
r = requests.get(url=f'http://api.sunrise-sunset.org/json?lat={sitedata["latitude"]}&lng={sitedata["longitude"]}&date={date_str}&formatted=0')
data = r.json()['results']
# include timestep adjustment for observation period
start_ts = (pd.Timestamp( data['civil_twilight_begin'] ) + ts).strftime('%H:%M')
end_ts = (pd.Timestamp( data['civil_twilight_end'] ) + ts).strftime('%H:%M')
day = group.between_time(start_time=start_ts,end_time=end_ts)
night = group[~group.index.isin(day.index)]
# remove any values which are not zero during night
night = night.where(night==0)
# remove all night SWup values (not analysed)
night['SWup'] = np.nan
new_group = pd.concat([day,night])
new_group_list.append(new_group)
clean_sw = pd.concat(new_group_list).sort_index()
clean = df.copy()
clean[['SWdown','SWup']] = clean_sw
dirty = df.where(clean.isna())
print('night: \n%s\n' %dirty.count())
print(f'saving night stats to {siteattrs["sitename"]}_outliers_night.csv\n')
dirty.to_csv(f'{siteattrs["sitepath"]}/processing/{siteattrs["sitename"]}_outliers_night.csv')
return clean
def clean_constant(df, siteattrs):
'''
flag as dirty variables that are constant for some time (suspect instrument failure)
'''
# some variables have expected constant zero fluxes (e.g. rain), so allow this at all sites
zero_fluxes_ok = ['SWdown','Rainf','Snowf']
# Some variables like SoilTemp change very slowly, and at some sites measured with fewer significant figures
# so allow longer period of constant fluxes in some cases
if siteattrs['sitename'] in ['UK-Swindon','PL-Lipowa','PL-Narutowicza','US-Minneapolis','US-Minneapolis1','US-Minneapolis2']:
constant_fluxes_ok = ['SoilTemp','PSurf']
else:
constant_fluxes_ok = ['SoilTemp']
# get list of fluxes except constant_fluxes_ok
fluxes0 = [n for n in df.columns.to_list() if n not in constant_fluxes_ok]
# get list of fluxes from fluxes0, except zero_fluxes_ok (i.e. standard set of variables)
fluxes1 = [n for n in fluxes0 if n not in zero_fluxes_ok]
# QC: where values repeat for 4 steps in row (standard qc)
df1 = df[fluxes1]
constant1 = df1.where( ( df1.eq(df1.shift(1))) & (df1.eq(df1.shift(2))) & (df1.eq(df1.shift(3))) )
# QC: where values repeat for 4 steps in row, and excluding zero
df2 = df[zero_fluxes_ok]
constant2 = df2.where( (df2.eq(df2.shift(1))) & (df2.eq(df2.shift(2))) & (df2.eq(df2.shift(3))) & (df2.ne(0)) )
# QC: where values repeat for 12 steps in a row (special cases)
df3 = df[df.columns.intersection(constant_fluxes_ok)]
constant3 = df3.where( ( df3.eq(df3.shift(1))) & (df3.eq(df3.shift(2))) & (df3.eq(df3.shift(3)))
& (df3.eq(df3.shift(4))) & (df3.eq(df3.shift(5))) & (df3.eq(df3.shift(6))) & (df3.eq(df3.shift(7))) & (df3.eq(df3.shift(8)))
& (df3.eq(df3.shift(9))) & (df3.eq(df3.shift(10))) & (df3.eq(df3.shift(11))) )
# bring all flagged dirty together
dirty = pd.concat([constant1,constant2,constant3], axis=1)
clean = df.where(dirty.isna())
print('constant: \n%s\n' %dirty.count())
print(f'saving constant stats to {siteattrs["sitename"]}_outliers_constant.csv\n')
dirty.to_csv(f'{siteattrs["sitepath"]}/processing/{siteattrs["sitename"]}_outliers_constant.csv')
return clean
################################################################################
def plot_all_obs(clean,dirty,stats,sitename,sitepath,all_fluxes,qc_list,saveplot=True):
print('plotting all_obs')
df = clean.combine_first(dirty)
fig_hgt = len(all_fluxes)*.4
plt.close('all')
fig, axes = plt.subplots(
nrows=len(all_fluxes),
ncols=1,
sharex=True,
figsize=(10,fig_hgt))
for i,ax in enumerate(axes.flatten()):
flux = all_fluxes[i]
# #### exclude gap-filled from obs ####
# # plot clean
clean[flux].where(clean[f'{flux}_qc']==0).plot(ax=ax, color='k', lw=0.5)
# plot obs filled
clean[flux].where(clean[f'{flux}_qc']==1).plot(ax=ax, color='tab:blue', lw=0.5)
# plot missing
missing_idx = df[flux][(df[flux].isna())].index
missing = pd.Series( np.full( len(missing_idx),df[flux].min() ),index=missing_idx )
if len(missing) > 0:
missing.plot(ax=ax,color='darkorange',lw=0,marker='.',ms=1.5)
# plot dirty
dirty[flux].plot(ax=ax, color='red', lw=0.0, marker='.', ms=1.5)
# annotations
ax.text(-0.01,0.5,flux,
fontsize=10, ha='right',va='center',transform=ax.transAxes)
if i==0:
ax.set_title('Observations at %s' %sitename)
ax.text(1.03,1.01,'missing' ,
fontsize=7, color='darkorange', ha='center',va='bottom',transform=ax.transAxes)
ax.text(1.08,1.01,'flagged' ,
fontsize=7, color='red', ha='center',va='bottom',transform=ax.transAxes)
ax.text(1.13,1.01,'filled' ,
fontsize=7, color='tab:blue', ha='center',va='bottom',transform=ax.transAxes)
ax.text(1.18,1.01,'avail.' ,
fontsize=7, color='k', ha='center',va='bottom',transform=ax.transAxes)
ax.text(1.03,0.5,'%.1f%%' %(100*stats.loc[flux,'missing']),
fontsize=7, color='darkorange', ha='center',va='center',transform=ax.transAxes)
ax.text(1.08,0.5,'%.1f%%' %(100*stats.loc[flux,'qc_flagged']),
fontsize=7, color='red', ha='center',va='center',transform=ax.transAxes)
ax.text(1.13,0.5,'%.1f%%' %(100*stats.loc[flux,'filled']),
fontsize=7, color='tab:blue', ha='center',va='center',transform=ax.transAxes)
ax.text(1.18,0.5,'%.1f%%' %(100*stats.loc[flux,'available']),
fontsize=7, color='k', ha='center',va='center',transform=ax.transAxes)
ax.axes.get_yaxis().set_ticks([])
ax.tick_params(axis='x',which='minor',bottom=False)
ax.set_xlabel(None)
if saveplot:
fig.savefig(f'{sitepath}/obs_plots/all_obs_qc.{img_fmt}', dpi=200,bbox_inches='tight')
else:
plt.show()
def plot_qc_timeseries(ds,clean,dirty,plt_str,flux,sitename,sitepath,saveplot=True):
print('plotting %s timeseries' %flux)
plt.close('all')
fig, ax = plt.subplots(figsize=(10,5))
clean[flux].plot(ax=ax, color='0.5', lw=0.5, label='clean obs')
dirty[flux].plot(ax=ax, color='r',marker='x',ms=3,lw=0,label='qc flagged')
# annotations
ax = plt_annotate(ax,plt_str,fs=7)
ax.legend(loc='upper center', fontsize=7)
ax.set_title(f"{sitename}: {ds[flux].attrs['long_name']}" )
ax.set_ylabel(f"{ds[flux].attrs['long_name']} [{ds[flux].attrs['units']}]")
ax.set_xlim((ds.time_coverage_start,ds.time_coverage_end))
if saveplot==True:
fig.savefig(f'{sitepath}/obs_plots/{flux}_obs_qc_ts.{img_fmt}', dpi=150,bbox_inches='tight')
else:
plt.show()
def plot_qc_diurnal(ds,local_clean,local_dirty,plt_str,flux,sitename,sitepath,saveplot=True):
print('plotting %s qc diurnal' %flux)
plt.close('all')
fig, ax = plt.subplots(figsize=(10,5))
clean_date = local_clean[flux].groupby(local_clean.index.date)
clean_time = local_clean[flux].groupby(local_clean.index.time)
for i,(key,item) in enumerate(clean_date):
# ax.plot(item.index.time, item, color='0.75',lw=0.3,label='all clean data' if i == 0 else None)
item.index = item.index.time
item.plot(color='0.75',lw=0.3,label='all clean data' if i == 0 else '_nolegend_')
if local_dirty[flux].count()>0:
dirty_date = local_dirty[flux].groupby(local_dirty.index.date)
for i,(key,item) in enumerate(dirty_date):
# ax.plot(item.index.time, item, color='r',marker='x',ms=3,lw=0.3,label='qc flagged' if i == 0 else None)
item.index = item.index.time
item.plot(color='r',marker='x',ms=3,lw=0.3,label='qc flagged' if i == 0 else '_nolegend_')
clean_time.mean().plot(ax=ax, color='k',lw=1.5,label='mean of clean data')
clean_time.quantile(0.10).plot(ax=ax, color='k',lw=1,ls='dashed',label='10th & 90th percentiles')
clean_time.quantile(0.90).plot(ax=ax, color='k',lw=1,ls='dashed',label='_nolegend_')
# annotations
ax = plt_annotate(ax,plt_str,fs=7)
ax.legend(loc='upper center', fontsize=7,ncol=2)
ax.set_title(f"{sitename}: {ds[flux].attrs['long_name']} diurnal values" )
ax.set_ylabel(f"{ds[flux].attrs['long_name']} [{ds[flux].attrs['units']}]")
ax.set_xlim(('00:00','23:30:00'))
ax.set_xticks([str(x).zfill(2)+':00' for x in range(0,24,3)] )
if saveplot==True:
fig.savefig(f'{sitepath}/obs_plots/{flux}_obs_qc_diurnal.{img_fmt}', dpi=150,bbox_inches='tight')
else:
plt.show()
def plt_annotate(ax,plt_str,fs=7):
ax.text(0.02,0.96, plt_str['time_start'], fontsize=fs, va='center',ha='left', transform=ax.transAxes)
ax.text(0.02,0.92, plt_str['time_end'], fontsize=fs, va='center',ha='left', transform=ax.transAxes)
ax.text(0.02,0.88, plt_str['days'], fontsize=fs, va='center',ha='left', transform=ax.transAxes)
ax.text(0.02,0.84, plt_str['interval'], fontsize=fs, va='center',ha='left', transform=ax.transAxes)
ax.text(0.98, 0.96, plt_str['timesteps'], fontsize=fs, va='center',ha='right', transform=ax.transAxes)
ax.text(0.98, 0.92, plt_str['missing'], fontsize=fs, va='center', ha='right', transform=ax.transAxes)
ax.text(0.98, 0.88, plt_str['qcflags'], fontsize=fs, va='center', ha='right', transform=ax.transAxes)
ax.text(0.98, 0.84, plt_str['available'], fontsize=fs, va='center', ha='right', transform=ax.transAxes)
return ax
def convert_utc_to_local_str(utc_str,offset_from_utc):
local = pd.to_datetime(utc_str) + pd.Timedelta('%s hours' %offset_from_utc)
local_str = local.strftime('%Y-%m-%d %H:%M:%S')
return local_str
def convert_utc_to_local(df,offset_from_utc):
print('converting to local time')
tzhrs = int(offset_from_utc)
tzmin = int((offset_from_utc - int(offset_from_utc))*60)
df.index = df.index + np.timedelta64(tzhrs,'h') + np.timedelta64(tzmin,'m')
return df
def convert_local_to_utc(df,offset_from_utc):
print('converting to utc')
tzhrs = int(offset_from_utc)
tzmin = int((offset_from_utc - int(offset_from_utc))*60)
df.index = df.index - np.timedelta64(tzhrs,'h') - np.timedelta64(tzmin,'m')
return df
# std and mean function for period
def get_sigma(start,end,data,sigma):
''' '''
subset = data.loc[start:end]
std = subset.groupby(subset.index.hour).std()
mean = subset.groupby(subset.index.hour).mean()
high_sigma = mean + sigma*std
low_sigma = mean - sigma*std
return subset,high_sigma,low_sigma,mean
def calc_sigma_data(alldata, sigma_vals, sitepath, window=30, plotdetail=False):
'''looks for outliers by caclulating which values in each hour within the window are outside the standard deviation x sigma'''
alldirtydata = pd.DataFrame()
allcleandata = pd.DataFrame()
for flux in alldata.columns:
sigma = sigma_vals[flux]
print(f"analysing {flux} for {sigma} sigma")
to_clean = alldata[[flux]]
out_period_frames = []
outside_sum = 0
# select first handling period
start = pd.Timestamp(to_clean.index[0])
end = start + pd.DateOffset(days=window) - pd.DateOffset(minutes=1)
totdays = (to_clean.index[-1] - to_clean.index[0]).components.days + 1
# now loop over periods in year with steps of ndays
for nloop in range(1,totdays,window):
# print(f"analysing {flux}: day {nloop}-{nloop+window} for {sigma} sigma", end='\r')
# # get subset dataframe info for period
subset, high, low, mean = get_sigma(start,end,to_clean,sigma)
# check if data is outside bounds in each hour
hour_frames = []
#########################
for i in range(24):
# select hour in subset and create dataframe if outside range
df = subset[subset.index.hour==i]
hour_outside = df[(df < low.loc[i]) | (df > high.loc[i])]
# append each hour for later concat
hour_frames.append(hour_outside)
# hourly detailed plot
if plotdetail:
if i == 0:
plt.close('all')
fig = plt.figure(figsize=(14,8))
ax = fig.add_subplot(4, 6, i+1)
# plot all points in black
ax.scatter(x=df.index,y=df,alpha=0.5,c='k',s=6,marker='o',edgecolors='none')
for flux in df.columns:
# df.plot.scatter(k,flux, ax=ax)
ax.plot([start,end],[high.loc[i,flux],high.loc[i,flux]], lw=0.4, color='r')
ax.plot([start,end],[low.loc[i,flux], low.loc[i,flux]], lw=0.4, color='r')
ax.plot([start,end],[mean.loc[i,flux],mean.loc[i,flux]], lw=1.5, color='k')
ax.scatter(x=df.index,y=hour_outside,s=16,marker='o',edgecolors='r',color='none')
ax.text(0.01,1.08,'hour %s' %i,transform=ax.transAxes,va='top',ha='left',fontsize=8)
ax.text(0.99,1.08,'n > sigma = %s' %(hour_outside.count()[0]),transform=ax.transAxes,va='top',ha='right',fontsize=8)
# ax.set_xticks([start,end])
ax.set_xticks([])
ax.tick_params(labelsize=7)
outside_sum = outside_sum + hour_outside.count()[0]
if i == 23:
title_text = f'{flux} for days {nloop} - {nloop+window} with n > sigma({sigma}) = {outside_sum}'
fig.suptitle(title_text ,x=0.5,y=0.92, fontsize=16)
# plt.show()
plt.savefig(f'{sitepath}/obs_plots/{flux}_qc_sigma_{nloop}_{nloop+window}.{img_fmt}' , dpi=300,bbox_inches='tight')
plt.close('all')
#########################
subset_out = pd.concat(hour_frames)
# append each period for later collection
out_period_frames.append(subset_out)
start = end + pd.DateOffset(minutes=1)
end = start + pd.DateOffset(days=window) - | pd.DateOffset(minutes=1) | pandas.DateOffset |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import collections
import csv
import itertools
import logging
import os
import re
import subprocess
import tempfile
import time
import numpy as np
import pandas as pd
import scipy.interpolate
import sklearn.metrics
import config
from src import utils
from src.classifiers import multipleinstance
from src import streamtransformers
from src import storage
from src import dataset
CURVES = ['fprValues', 'tprValues', 'rocThresholds', 'precisionValues', 'recallValues']
PRED_METRICS = ['ACC', 'P', 'R', 'F'] # Metrics requiring predictions
PROB_METRICS = ['PR', 'ROC']
STATISTICS = ['RESULTS', 'VANDALISM']
COLUMNS = (
list(itertools.product(['ALL'], STATISTICS + PRED_METRICS + PROB_METRICS + CURVES)) +
list(itertools.product(['ITEM_HEAD', 'ITEM_BODY'], STATISTICS + PROB_METRICS)) +
list(itertools.product(['REGISTERED', 'UNREGISTERED'], STATISTICS + PROB_METRICS))
)
_metrics = pd.DataFrame()
_logger = logging.getLogger()
#######################################################################
# Computing metrics of classifiers / features
#######################################################################
def fit(clf, dataset, index='', sample_weight=None):
label = _get_label(index)
_logger.debug("Fitting %s..." % label)
if (isinstance(clf, multipleinstance.BaseMultipleInstanceClassifier)):
g = dataset.get_group_ids()
clf.fit(g, dataset.get_X(), dataset.get_Y())
else:
clf.fit(dataset.get_X(), dataset.get_Y(), sample_weight)
_logger.debug("Fitting %s... done." % label)
def predict(clf, dataset, index=''):
label = _get_label(index)
_logger.debug("Predicting %s..." % label)
if (isinstance(clf, multipleinstance.BaseMultipleInstanceClassifier)):
g = dataset.get_group_ids()
prob = clf.predict_proba(g, dataset.get_X())
else:
# second column denotes the probability for vandalism
prob = clf.predict_proba(dataset.get_X())[:, 1]
pred = get_pred_from_prob(prob)
_logger.debug("Predicting %s... done." % label)
return pred, prob
def get_pred_from_prob(prob):
return np.asarray(prob >= 0.5)
def split_groups(dataset):
r = dataset.get_revision_ids().values
g = dataset.get_group_ids().values
s = dataset.get_meta()['revisionAction'] == 'rollback'
result = [np.nan] * len(g)
transformer = streamtransformers.StreamGroupSplitTransformer()
for i in range(len(g)):
result[i] = transformer.partial_fit_transform(r[i], g[i], s[i])
_logger.debug("Number of group splits: " + str(transformer.group_splits))
return result
def _get_label(index):
if hasattr(index, 'values'):
label = "%s" % (str(index.values[0]))
else:
label = str(index)
return label
def evaluate_print(name, pred, prob, dataset):
metrics = evaluate(name, pred, prob, dataset)
print_metrics(metrics)
result = metrics.iloc[0].loc[('ALL', 'PR')]
return result
def evaluate(index, pred, prob, ds, save_prob=True, fit_time=-1, prob_time=-1):
label = _get_label(index)
_logger.debug("Evaluating %s..." % label)
# might perform some conversion internally (e.g., from float64 to float32)
name = index_to_str(index)
storage.dump_predictions(name, ds, prob, tmp=not save_prob)
prob = storage.load_predictions(name, tmp=not save_prob)['VANDALISM_SCORE'].values
local_metrics = compute_metrics(
index, ds.get_metrics_meta(), ds.get_Y(), prob, pred)
idx = local_metrics.columns.get_loc(('ALL', 'PR')) + 1
local_metrics.insert(idx, ('ALL', 'TOTAL_TIME'), fit_time + prob_time)
local_metrics.insert(idx, ('ALL', 'PROB_TIME'), prob_time)
local_metrics.insert(idx, ('ALL', 'FIT_TIME'), fit_time)
_logger.debug("Evaluating %s... done." % label)
return local_metrics
def index_to_str(index):
"""Convert Pandas MultiIndex to String."""
if isinstance(index, pd.core.index.MultiIndex):
index_str_entries = map(str, index[0]) # convert index levels to string
filename = '_'.join(index_str_entries).replace(' ', '_')
else: # multiindex
filename = index
return filename
def fit_predict_evaluate(
index, clf, training, validation, save_prob=True, sample_weight=None):
fit_start = time.time()
fit(clf, training, index, sample_weight)
fit_end = time.time()
fit_time = fit_end - fit_start
prob_start = time.time()
pred, prob = predict(clf, validation, index)
prob_end = time.time()
prob_time = prob_end - prob_start
metrics = evaluate(index, pred, prob, validation,
save_prob, fit_time, prob_time)
return pred, prob, metrics
def remove_plots(metrics):
return remove_columns(metrics, CURVES)
def remove_columns(metrics, columns):
labels_to_drop = list(itertools.product(
metrics.columns.levels[0], columns))
result = metrics.drop(labels_to_drop, axis=1, errors='ignore')
return result
def _remove_duplicates(seq):
result = []
for e in seq:
if e not in result:
result.append(e)
return result
def print_metrics(metrics, suffix='metrics', append_global=True):
if (append_global):
global _metrics
_metrics = _metrics.append(metrics)
_print_metrics(_metrics, suffix)
else:
_print_metrics(metrics, suffix)
def _print_metrics(metrics, suffix):
metrics.to_csv(config.OUTPUT_PREFIX + '_' + suffix + '.csv')
metrics = remove_columns(metrics, CURVES)
_logger.info("Metrics for %s:\n" % suffix +
(metrics.to_string(float_format='{:.4f}'.format)))
metrics = remove_columns(metrics, STATISTICS)
print_metrics_to_latex(metrics,
config.OUTPUT_PREFIX + '_' + suffix + '.tex')
def print_metrics_to_latex(metrics, filename):
r"""Print metrics to latex and format them as \\bscellA{}."""
metrics = metrics.copy()
def cell_format(value, char):
return '\\bscell%s[%.3f]{%3.0f}' % (char, value, value * 100)
def float_format_short(value):
return '%.3f' % (value,)
def float_format_long(value):
return '%.4f' % (value,)
def float_formatA(value):
return cell_format(value, 'A')
def float_formatB(value):
return cell_format(value, 'B')
# use formatB for all 'ROC' columns
formatters = {}
for value in metrics.columns.values:
if value[1] == 'PR':
formatters[value] = float_formatA
elif value[1] == 'ROC':
formatters[value] = float_formatB
elif value[1] == 'ACC':
formatters[value] = float_format_long
else:
formatters[value] = None
# workaround because the index is not properly formatted in Latex
metrics = metrics.reset_index()
metrics.to_latex(filename,
float_format=float_format_short,
formatters=formatters,
escape=False, index=False)
# This method is called by multiple processes
def compute_metrics(index, meta, y_true, y_score, y_pred):
_logger.debug("Computing metrics...")
utils.collect_garbage()
result = collections.OrderedDict() # noqa
result['ALL'] = compute_metrics_for_mask(index, get_content_mask(meta, 'ALL') , 'ALL' , y_true, y_score, y_pred) # noqa
result['ITEM_HEAD'] = compute_metrics_for_mask(index, get_content_mask(meta, 'ITEM_HEAD'), 'ITEM_HEAD' , y_true, y_score, y_pred) # noqa
result['ITEM_BODY'] = compute_metrics_for_mask(index, get_content_mask(meta, 'ITEM_BODY'), 'ITEM_BODY' , y_true, y_score, y_pred) # noqa
result['REGISTERED'] = compute_metrics_for_mask(index, get_user_mask(meta, 'REGISTERED') , 'REGISTERED' , y_true, y_score, y_pred) # noqa
result['UNREGISTERED'] = compute_metrics_for_mask(index, get_user_mask(meta, 'UNREGISTERED'), 'UNREGISTERED', y_true, y_score, y_pred) # noqa
result = pd.concat(result.values(), axis=1, keys=result.keys())
utils.collect_garbage()
_logger.debug("Computing metrics...done.")
return result
def get_content_mask(meta, content_type):
content_types = meta[dataset.CONTENT_TYPE]
if content_type == 'ALL':
content_type_mask = np.ones((len(content_types),), dtype=bool)
elif content_type == 'ITEM_HEAD':
content_type_mask = np.array(content_types.values == 'TEXT')
elif content_type == 'ITEM_BODY':
content_type_mask = np.array(
(content_types.values == 'STATEMENT') |
(content_types.values == 'SITELINK')
)
else:
content_type_mask = np.array(content_types.values == content_type)
return content_type_mask
def get_user_mask(meta, user_type):
registered_mask = np.asarray(meta[dataset.IS_REGISTERED_USER])
if user_type == 'REGISTERED':
return registered_mask
elif user_type == 'UNREGISTERED':
return ~registered_mask
else:
raise Exception("Unsupported user type '%s'" % str(user_type))
def compute_metrics_for_mask(
index, mask, mask_name, y_true, y_score, y_pred):
y_true = y_true[mask]
y_pred = y_pred[mask]
if y_score is not None:
y_score = y_score[mask]
result = collections.OrderedDict()
if len(y_true) > 0 and sum(y_true) > 0:
# Metrics based on prediction
result['ACC'] = sklearn.metrics.accuracy_score(y_true, y_pred)
result['P'] = sklearn.metrics.precision_score(y_true, y_pred)
result['R'] = sklearn.metrics.recall_score(y_true, y_pred)
result['F'] = sklearn.metrics.f1_score(y_true, y_pred)
result['RESULTS'] = len(y_pred)
result['VANDALISM'] = np.sum(y_true)
# Metrics based on probabilistic score
if y_score is not None:
result['ROC'] = sklearn.metrics.roc_auc_score(y_true, y_score)
fpr, tpr, roc_thresholds = _roc_curve(y_true, y_score)
precision_values, recall_values, auc_pr = \
_goadrich_precision_recall_curve(y_true, y_score)
result['PR'] = auc_pr
result['fprValues'] = [_format_values(fpr)]
result['tprValues'] = [_format_values(tpr)]
result['rocThresholds'] = [_format_values(roc_thresholds)]
result['precisionValues'] = [_format_values(precision_values)]
result['recallValues'] = [_format_values(recall_values)]
else:
_logger.warn(
"No positive example for " + str(index) + " and " +
str(mask_name))
if len(result.keys()) == 0:
result['ROC'] = 0
result['fprValues'] = [np.zeros(2)]
result['tprValues'] = [np.zeros(2)]
result['rocThresholds'] = [np.zeros(2)]
result['PR'] = 0
result['precisionValues'] = [np.zeros(2)]
result['recallValues'] = [np.zeros(2)]
result = | pd.DataFrame(result) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import networkx as nx
def create_polarity_csv(neighbors_csv_path, mcmc_path, user_polarities_paths):
"""
Merge the neighbors csv with both the neighbourhood-based polarities and the
following-based polarities.
Input:
neighbors_csv_path : path to the csv containing the screen names and user ids
mcmc_path : path to the csv containing the MCMC polarity estimations
user_polarities_paths : list containing the paths to the polarities csvs obtained
via following-based MCMC estimations
Returns: None
Files generated:
csv file : ../generated_csvs/{file_name_base}_polarities_base.csv"
"""
# merge the first two csvs
original_csv = _merge_neigbors_csv_with_mcmc_polarities(neighbors_csv_path, mcmc_path)
# get the second set of csvs into a single one
_users = pd.DataFrame()
for file in user_polarities_paths:
_users = pd.concat((_users, pd.read_csv(file, header = None, sep = "\t")))
_users = _users.drop_duplicates(0).drop(columns = 2).rename(columns = {0:1, 1:"Polarity Following"})
# merge them
results = pd.merge(original_csv, _users, on = 1, how = 'left')
# save csv
save_path = os.path.join("..", "generated_csvs", os.path.split(neighbors_csv_path)[1].split("_")[0] + "_" + os.path.split(neighbors_csv_path)[1].split("_")[1] + "_polarities_base.csv")
results.reset_index(drop = True).to_csv(save_path, index = False)
def _merge_neigbors_csv_with_mcmc_polarities(neighbors_csv_path, mcmc_path):
"""
Merge row-wise the csv containing the screen names and their corresponding ids
with the csv containing the polarities estimated in R via MCMC.
Input:
neighbors_csv_path : path to the csv containing the screen names and user ids
mcmc_path : path to the csv containing the MCMC polarity estimations
Returns: pandas dataframe containing the original neigbors_csv_columns and a new
column for the estimated polarities
"""
# read the csvs
original_csv = pd.read_csv(neighbors_csv_path, header = None)
mcmc_csv = pd.read_csv(mcmc_path, sep = ' ')
# get the polarities from the mcmc csv
polarities = []
for entry in mcmc_csv['results']:
try:
polarities.append(float(entry))
except ValueError:
polarities.append(np.nan)
original_csv['Polarity Neighbours'] = polarities
return original_csv
def make_neighbors_csv(graph_folder_path):
"""
Generate a csv file containing the columns 'node', 'neighbors' and 'weight'
for a given graph file.
Inputs:
graph_folder_path : path to the graph used to generate the csv
Returns: None
Files generated:
csv file : ../generated_csvs/{graph_name}_neighbors_names.csv
"""
# make the pandas dataframe
graph = nx.read_gexf(graph_folder_path)
node_table = pd.DataFrame(columns = ['node', 'neighbors', 'weight'])
for node in graph.nodes():
node_table = node_table.append({'node': node, 'neighbors': list(graph.neighbors(node)), 'weight': len(list(graph.neighbors(node)))}, ignore_index = True)
# get the name of the save file
save_file = os.path.split(graph_folder_path)[-1].split(".")[0]
save_file = os.path.join("..", "generated_csvs", save_file + "_neighbors_names.csv")
# save csv
if not os.path.exists(os.path.join("..", "generated_csvs")):
os.mkdir(os.path.join("..", "generated_csvs"))
node_table.reset_index(drop = True).to_csv(save_file, index = False)
def merge_neighbors_and_polarity(neighbors_csv_path, polarity_path):
"""
Merge the neighbors csv with the polarity csv. The resulting csv has the
following columns: index (the indexes of the nodes in the Laplacian matrix),
node name, neighbors, weight, polarity.
Inputs:
neighbors_csv_path : path to the neighbors csv
polarity_path : path to the polarity csv
Returns: None
Files generated:
csv file : ../generated_csv/{neighbor_csv_file_name}_neighbors_polarity_merged.csv
"""
# get the name of the save file
save_file = ""
for entry in os.path.split(neighbors_csv_path)[-1].split(".")[0].split("_")[0:-2]:
save_file += entry + "_"
save_file = os.path.join("..", "generated_csvs", save_file + "neighbors_polarity_merged.csv")
# combine the csvs
node_table = | pd.read_csv(neighbors_csv_path) | pandas.read_csv |
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
t25 = pd.Timestamp('2021-12-26') # Start of Christmass break
t26 = pd.Timestamp('2022-01-06') # End of Christmass break
t27 = pd.Timestamp('2022-02-28') # Start of Spring Break
t28 = pd.Timestamp('2022-03-06') # End of Spring Break
t29 = pd.Timestamp('2022-04-04') # Start of Easter Break
t30 = pd.Timestamp('2022-04-17') # End of Easter Break
t31 = pd.Timestamp('2022-07-01') # Start of summer holidays
t32 = pd.Timestamp('2022-09-01') # End of summer holidays
t33 = pd.Timestamp('2022-09-21') # Opening of universities
t34 = pd.Timestamp('2022-10-31') # Start of autumn break
t35 = pd.Timestamp('2022-11-06') # End of autumn break
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-06-01') # Start of lockdown relaxation
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-10-01') # Flanders releases all measures
t23 = pd.Timestamp('2021-11-01') # Start of autumn break
t24 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t25 = pd.Timestamp('2021-11-22') # Start of mandatory telework + start easing in leisure restrictions
t26 = pd.Timestamp('2021-12-18') # Start of Christmass break for schools
t27 = pd.Timestamp('2021-12-26') # Start of Christmass break for general population
t28 = pd.Timestamp('2022-01-06') # End of Christmass break
t29 = pd.Timestamp('2022-01-28') # End of measures
t30 = pd.Timestamp('2022-02-28') # Start of Spring Break
t31 = pd.Timestamp('2022-03-06') # End of Spring Break
t32 = pd.Timestamp('2022-04-04') # Start of Easter Break
t33 = pd.Timestamp('2022-04-17') # End of Easter Break
t34 = pd.Timestamp('2022-07-01') # Start of summer holidays
t35 = pd.Timestamp('2022-09-01') # End of summer holidays
t36 = pd.Timestamp('2022-09-21') # Opening of universities
t37 = pd.Timestamp('2022-10-31') # Start of autumn break
t38 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t1 < t <= t1 + l1_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
elif t3 < t <= t4:
l = (t4 - t3)/pd.Timedelta(days=1)
r = (t3 - t2)/(t4 - t2)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t3, l)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.7)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=1)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
l = (t20 - t19)/pd.Timedelta(days=1)
r = (t19 - t18)/(t20 - t18)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, r*prev_rest_relaxation, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t19, l)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.75*prev_rest_relaxation, school=0.7)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, 0.70*prev_rest_relaxation, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
elif t24 < t <= t25:
# End of autumn break --> Date of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t25 < t <= t25 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t25, 5)
elif t25 + pd.Timedelta(5, unit='D') < t <= t26:
# End easing in leisure restrictions --> Early schools closure before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t26 < t <= t27:
# Early schools closure before Christmas holiday --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t27 < t <= t28:
# Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=scenarios_work[scenario]-0.2, leisure=scenarios_leisure[scenario], transport=scenarios_work[scenario]-0.2, school=0)
elif t28 < t <= t29:
# Christmass holiday --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t29 < t <= t30:
# End of Measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
leisure=1, work=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t31 < t <= t32:
# Spring Break --> Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t32 < t <= t33:
# Easter
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t33 < t <= t34:
# Easter --> Summer
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t37 < t <= t38:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
###################
## Spatial model ##
###################
def policies_all_spatial(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
t24 = pd.Timestamp('2021-12-26') # Start of Christmass break
t25 = pd.Timestamp('2022-01-06') # End of Christmass break
t26 = pd.Timestamp('2022-02-28') # Start of Spring Break
t27 = pd.Timestamp('2022-03-06') # End of Spring Break
t28 = pd.Timestamp('2022-04-04') # Start of Easter Break
t29 = pd.Timestamp('2022-04-17') # End of Easter Break
t30 = pd.Timestamp('2022-07-01') # Start of summer holidays
t31 = pd.Timestamp('2022-09-01') # End of summer holidays
t32 = pd.Timestamp('2022-09-21') # Opening of universities
t33 = pd.Timestamp('2022-10-31') # Start of autumn break
t34 = pd.Timestamp('2022-11-06') # End of autumn break
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t25:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t25 < t <= t26:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t26 < t <= t27:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=1.1, work=0.9, transport=1, others=1, school=0)
elif t27 < t <= t28:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t28 < t <= t29:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t29 < t <= t30:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1.3, transport=1, others=1, school=0)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.8)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.9, leisure=1.1, transport=1, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_spatial_WAVE4(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-07') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 = pd.Timestamp('2020-11-16') # schools re-open
t11 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t12 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t13 = pd.Timestamp('2021-02-15') # Spring break starts
t14 = pd.Timestamp('2021-02-21') # Spring break ends
t15 = pd.Timestamp('2021-02-28') # Contact increase in children
t16 = pd.Timestamp('2021-03-26') # Start of Easter holiday
t17 = pd.Timestamp('2021-04-18') # End of Easter holiday
t18 = pd.Timestamp('2021-05-07') # Start of relaxations
t19 = pd.Timestamp('2021-07-01') # Start of Summer holiday
t20 = pd.Timestamp('2021-09-01') # End of Summer holiday
t21 = pd.Timestamp('2021-09-21') # Opening of universities
t22 = pd.Timestamp('2021-11-01') # Start of autumn break
t23 = pd.Timestamp('2021-11-07') # End of autumn break
# Fourth WAVE
t24 = pd.Timestamp('2021-11-22') # Start mandatory telework
t25 = pd.Timestamp('2021-12-18') # Early closing of schools
t26 = pd.Timestamp('2021-12-26') # Start of Christmass break
t27 = pd.Timestamp('2022-01-06') # End of Christmass break
t28 = pd.Timestamp('2022-01-28') # End of measures
t29 = pd.Timestamp('2022-02-28') # Start of Spring Break
t30 = pd.Timestamp('2022-03-06') # End of Spring Break
t31 = pd.Timestamp('2022-04-04') # Start of Easter Break
t32 = pd.Timestamp('2022-04-17') # End of Easter Break
t33 = pd.Timestamp('2022-07-01') # Start of summer holidays
t34 = pd.Timestamp('2022-09-01') # End of summer holidays
t35 = pd.Timestamp('2022-09-21') # Opening of universities
t36 = pd.Timestamp('2022-10-31') # Start of autumn break
t37 = pd.Timestamp('2022-11-06') # End of autumn break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
scenarios_schools = [1, 1, 1, 1, 1]
scenarios_leisure = [1, 1, 0.75, 0.50, 0.25]
spatial_summer_lockdown_2020 = tuple(np.array([prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_lockdown, # W
prev_rest_lockdown, # Bxl
prev_rest_lockdown, prev_rest_lockdown, # F
prev_rest_relaxation, prev_rest_relaxation, # W
prev_rest_lockdown, # F
0.7*prev_rest_relaxation, 0.7*prev_rest_relaxation])) # W
co_F = 0.60
co_W = 0.50
co_Bxl = 0.45
spatial_summer_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
co_F = 1.00
co_W = 0.50
co_Bxl = 0.45
relaxation_flanders_2021 = tuple(np.array([co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, # W
co_Bxl*prev_rest_relaxation, # Bxl
co_F*prev_rest_relaxation, co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation, # W
co_F*prev_rest_relaxation, # F
co_W*prev_rest_relaxation, co_W*prev_rest_relaxation])) # W
if t <= t1:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
elif t1 < t <= t1 + l1_days:
t = pd.Timestamp(t.date())
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1) #self.Nc_all['total']
policy_new = self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
return self.ramp_fun(policy_old, policy_new, t, t1, l1)
elif t1 + l1_days < t <= t2:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_lockdown, school=0)
elif t2 < t <= t3:
l = (t3 - t2)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
return self.ramp_fun(policy_old, policy_new, t, t2, l)
# 2020
elif t3 < t <= t4:
return self.__call__(t, prev_home=prev_home, prev_schools=prev_schools, prev_work=prev_work, prev_rest=prev_rest_relaxation, school=0)
elif t4 < t <= t5:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_lockdown_2020, school=0)
elif t5 < t <= t6:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0)
# Second wave
elif t6 < t <= t7:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=0.8)
elif t7 < t <= t8:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
elif t8 < t <= t8 + l2_days:
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation, school=1)
policy_new = self.__call__(t, prev_schools, prev_work, prev_rest_lockdown, school=1)
return self.ramp_fun(policy_old, policy_new, t, t8, l2)
elif t8 + l2_days < t <= t9:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t9 < t <= t10:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t10 < t <= t11:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t11 < t <= t12:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t12 < t <= t13:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t13 < t <= t14:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t14 < t <= t15:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t15 < t <= t16:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t16 < t <= t17:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=0)
elif t17 < t <= t18:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown,
school=1)
elif t18 < t <= t19:
l = (t19 - t18)/pd.Timedelta(days=1)
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_lockdown, school=0)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
return self.ramp_fun(policy_old, policy_new, t, t18, l)
elif t19 < t <= t20:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0)
elif t20 < t <= t21:
return self.__call__(t, prev_home, prev_schools, prev_work, spatial_summer_2021, school=0.8)
elif t21 < t <= t22:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t22 < t <= t23:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=0)
elif t23 < t <= t24:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, school=1)
elif t24 < t <= t24 + pd.Timedelta(5, unit='D'):
# Date of measures --> End easing in leisure restrictions
policy_old = self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, work=scenarios_work[scenario], school=1)
policy_new = self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
return self.ramp_fun(policy_old, policy_new, t, t24, 5)
elif t24 + pd.Timedelta(5, unit='D') < t <= t25:
# End easing in leisure restrictions --> Early school closing before Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021, work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=scenarios_schools[scenario])
elif t25 < t <= t26:
# Early school closing --> Christmas holiday
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=scenarios_work[scenario], leisure=scenarios_leisure[scenario], school=0)
elif t26 < t <= t27:
# Christmass break
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario] - 0.2, transport=scenarios_work[scenario] - 0.2, school=0)
elif t27 < t <= t28:
# Christmass --> End of measures
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
leisure=scenarios_leisure[scenario], work=scenarios_work[scenario], school=1)
elif t28 < t <= t29:
# End of measures --> Spring break
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t29 < t <= t30:
# Spring Break
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t30 < t <= t31:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=1, leisure=1, transport=1, others=1, school=1)
elif t31 < t <= t32:
return self.__call__(t, prev_home, prev_schools, prev_work, relaxation_flanders_2021,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t32 < t <= t33:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t33 < t <= t34:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
elif t34 < t <= t35:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=0.7)
elif t35 < t <= t36:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
elif t36 < t <= t37:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=0.7, leisure=1, transport=0.7, others=1, school=0)
else:
return self.__call__(t, prev_home, prev_schools, prev_work, prev_rest_relaxation,
work=1, leisure=1, transport=1, others=1, school=1)
def policies_all_work_only(self, t, states, param, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix of work contacts (Nc_work).
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Define key dates of first wave
t1 = pd.Timestamp('2021-12-26') # Start of Christmas break
t2 = pd.Timestamp('2022-01-06') # End of Christmas break
t3 = pd.Timestamp('2022-02-28') # Start of Spring Break
t4 = pd.Timestamp('2022-03-06') # End of Spring Break
t5 = pd.Timestamp('2022-04-04') # Start of Easter Break
t6 = pd.Timestamp('2022-04-17') # End of Easter Break
if t <= t1:
# Before Christmas --> Google data
return self.__call__(t, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t1 < t <= t2:
# Christmas
return self.__call__(t, work = 0.7, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t2 < t <= t3:
# Christmas --> Spring Break
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t3 < t <= t4:
# Spring Break
return self.__call__(t, work = 0.7, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t4 < t <= t5:
# Spring Break --> Easter Break
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t5 < t <= t6:
# Easter Break
return self.__call__(t, work = 0.7, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
# Easter Break --> ...
else:
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
def policies_all_work_only_WAVE4(self, t, states, param, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home, date_measures, scenario):
'''
Function that returns the time-dependant social contact matrix of work contacts (Nc_work).
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Fourth WAVE
t1 = pd.Timestamp('2021-11-22') # Mandatory telework + Start easing of leisure restrictions
t2 = pd.Timestamp('2021-12-26') # Start of Christmas break
t3 = pd.Timestamp('2022-01-06') # End of Christmas break
t4 = pd.Timestamp('2022-01-28') # End of measures
t5 = pd.Timestamp('2022-02-28') # Start of Spring Break
t6 = pd.Timestamp('2022-03-06') # End of Spring Break
t7 = pd.Timestamp('2022-04-04') # Start of Easter Break
t8 = pd.Timestamp('2022-04-17') # End of Easter Break
scenarios_work = [1, 0.7, 0.7, 0.7, 0.7]
if t <= t1:
# Before mandatory telework --> Google data
return self.__call__(t, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t1 < t <= t2:
# Mandatory telework --> Christmas
return self.__call__(t, work = scenarios_work[scenario], prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t2 < t <= t3:
# Christmas break
return self.__call__(t, work = scenarios_work[scenario] - 0.2 , prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t3 < t <= t4:
# Christmas break --> End measures
return self.__call__(t, work = scenarios_work[scenario], prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t4 < t <= t5:
# End of measures --> Spring break
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t5 < t <= t6:
# Spring break
return self.__call__(t, work = 0.7, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t6 < t <= t7:
# Spring break --> Easter
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
elif t7 < t <= t8:
# Easter break
return self.__call__(t, work = 0.7, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
else:
# Easter Break --> ...
return self.__call__(t, work = 1, prev_home=0, prev_schools=0, prev_work=prev_work, prev_rest=0, school=0)
##########################
## Seasonality function ##
##########################
class make_seasonality_function():
"""
Simple class to create a function that controls the season-dependent value of the transmission coefficients. Currently not based on any data, but e.g. weather patterns could be imported if needed.
"""
def __call__(self, t, states, param, amplitude, peak_shift):
"""
Default output function. Returns a sinusoid with average value 1.
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
amplitude : float
maximum deviation of output with respect to the average (1)
peak_shift : float
phase. Number of days after January 1st after which the maximum value of the seasonality rescaling is reached
"""
ref_date = pd.to_datetime('2021-01-01')
# If peak_shift = 0, the max is on the first of January
maxdate = ref_date + pd.Timedelta(days=peak_shift)
# One period is one year long (seasonality)
t = (t - pd.to_datetime(maxdate))/ | pd.Timedelta(days=1) | pandas.Timedelta |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import csv
import hashlib
from typing import ContextManager
import srt
import pandas
import functools
from pydub import AudioSegment
from datetime import datetime, timedelta
from pathlib import Path
from praatio import tgio
from .clean_transcript import clean_transcript
ALPHABET_FILE_PATH = "/DeepSpeech/bin/bangor_welsh/alphabet.txt"
def get_directory_structure(rootdir):
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir, followlinks=True):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = functools.reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
def import_textgrid(target_csv_file, textfile):
print ("Importing clips and transcripts from %s " % textfile)
target_data_root_dir = Path(target_csv_file).parent
target_clips_dir = os.path.join(target_data_root_dir, "clips")
Path(target_clips_dir).mkdir(parents=True, exist_ok=True)
df = pandas.DataFrame(columns=['wav_filename', 'wav_filesize', 'transcript'])
textgrid_file_path = os.path.join(target_data_root_dir, textfile)
soundfile = textgrid_file_path.replace(".TextGrid",".wav")
audio_file = AudioSegment.from_wav(os.path.join(target_data_root_dir, soundfile))
ooa_text_file_path = os.path.join(target_data_root_dir, 'deepspeech.ooa.txt')
clean = clean_transcript(ALPHABET_FILE_PATH, ooa_text_file_path)
tg = tgio.openTextgrid(textgrid_file_path)
entryList = tg.tierDict["utterance"].entryList
i=0
for interval in entryList:
text = interval.label
cleaned, transcript = clean.clean(text)
if cleaned and len(transcript)>0:
transcript = transcript.lower()
start = float(interval.start) * 1000
end = float(interval.end) * 1000
#print (start, end, transcript)
split_audio = audio_file[start:end]
hashId = hashlib.md5(transcript.encode('utf-8')).hexdigest()
wav_segment_filepath = os.path.join(target_clips_dir, hashId + ".wav")
split_audio.export(wav_segment_filepath, format="wav")
df.loc[i] = [wav_segment_filepath, os.path.getsize(wav_segment_filepath), transcript]
i += 1
return df
def import_srt(target_csv_file, srtfile):
print ("Importing transcripts from srt file in %s " % srtfile)
target_data_root_dir = Path(target_csv_file).parent
target_clips_dir = os.path.join(target_data_root_dir, "clips")
Path(target_clips_dir).mkdir(parents=True, exist_ok=True)
df = | pandas.DataFrame(columns=['wav_filename', 'wav_filesize', 'transcript']) | pandas.DataFrame |
# Copyright 2020 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from gatspy import periodic
import java
import copy
from astropy.time import Time
import dash
from dash.dependencies import Input, Output
import plotly.graph_objects as go
from apps.utils import convert_jd, readstamp, _data_stretch, convolve
from apps.utils import apparent_flux, dc_mag
from pyLIMA import event
from pyLIMA import telescopes
from pyLIMA import microlmodels, microltoolbox
from pyLIMA.microloutputs import create_the_fake_telescopes
from app import client, app
colors_ = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
all_radio_options = {
"Difference magnitude": ["Difference magnitude", "DC magnitude", "DC apparent flux"],
"DC magnitude": ["Difference magnitude", "DC magnitude", "DC apparent flux"],
"DC apparent flux": ["Difference magnitude", "DC magnitude", "DC apparent flux"]
}
layout_lightcurve = dict(
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
xanchor="right",
x=1,
bgcolor='rgba(0,0,0,0)'
),
xaxis={
'title': 'Observation date',
'automargin': True
},
yaxis={
'autorange': 'reversed',
'title': 'Magnitude',
'automargin': True
}
)
layout_phase = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=40, t=25),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
yanchor="bottom",
y=0.02,
xanchor="right",
x=1,
bgcolor='rgba(0,0,0,0)'
),
xaxis={
'title': 'Phase'
},
yaxis={
'autorange': 'reversed',
'title': 'Apparent DC Magnitude'
},
title={
"text": "Phased data",
"y": 1.01,
"yanchor": "bottom"
}
)
layout_mulens = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=40, t=25),
hovermode="closest",
legend=dict(
font=dict(size=10),
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
bgcolor='rgba(0,0,0,0)'
),
xaxis={
'title': 'Observation date'
},
yaxis={
'autorange': 'reversed',
'title': 'DC magnitude'
},
title={
"text": "pyLIMA Fit (PSPL model)",
"y": 1.01,
"yanchor": "bottom"
}
)
layout_scores = dict(
autosize=True,
automargin=True,
margin=dict(l=50, r=30, b=0, t=0),
hovermode="closest",
legend=dict(font=dict(size=10), orientation="h"),
xaxis={
'title': 'Observation date'
},
yaxis={
'title': 'Score',
'range': [0, 1]
}
)
def extract_scores(data: java.util.TreeMap) -> pd.DataFrame:
""" Extract SN scores from the data
"""
values = ['i:jd', 'd:snn_snia_vs_nonia', 'd:snn_sn_vs_all', 'd:rfscore']
pdfs = | pd.DataFrame.from_dict(data, orient='index') | pandas.DataFrame.from_dict |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > | pd.Timestamp("20130105") | pandas.Timestamp |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import os
import json
import numpy as np
import pandas as pd
from qa.utils import set_seed
DEFAULT_SIZES = [500, 1000, 1500, 2000, 2500, 3000]
def randomize_indices(data):
idx = np.arange(len(data))
return np.random.permutation(idx)
def partition_data(data, indices_or_sections=None, seed=42):
"""
data should be a ... what??? list?
partitions can be a number (as in, the number of partitions) or a list of data sizes?
"""
set_seed(seed)
dd = np.array(data)
idx = randomize_indices(data)
idx_chunks = np.array_split(idx, indices_or_sections)
partitions = [list(dd[chunk]) for chunk in idx_chunks]
return partitions
def create_increasing_sized_train_sets(json_data_file, sizes=DEFAULT_SIZES, **kwargs):
"""
json_data_file: filename of the original, full dataset from which to create subsets
sizes: list of dataset sizes to partition the original dataset into
these will translate into increasing sized datasets, with each
successive dataset consisting of the previous subset's examples plus
the number of additional examples identified in the splits
Takes filename of a json dataset and the desired sizes and creates
subsets of increasing size. These subsets are saved to the directory
associated with the json_data_file.
"""
outfile_prefix = os.path.splitext(json_data_file)[0]
json_data = json.load(open(json_data_file, "r"))
data_chunks = partition_data(json_data["data"][0]["paragraphs"], sizes, **kwargs)
new_json = {"data": [{"paragraphs": []}]}
for chunk in data_chunks:
try:
num_examples += len(chunk)
except:
num_examples = len(chunk)
new_json["data"][0]["paragraphs"] += chunk
json.dump(new_json, open(f"{outfile_prefix}_{num_examples}.json", "w"))
def load_results(data_dir):
data = json.load(open(data_dir + "/results_.json", "r"))
return pd.DataFrame(data, index=[0])
def load_predictions(data_dir):
preds = json.load(open(data_dir + "/predictions_.json", "r"))
return | pd.Series(preds) | pandas.Series |
import os
import unittest
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from datetime import datetime
from bqsqoop.utils.parquet_util import ParquetUtil
def sample_df():
_data = [
dict(colA="val1", colB=1),
dict(colA="val2", colB=2)
]
return | pd.DataFrame.from_dict(_data) | pandas.DataFrame.from_dict |
import string
import random
import pathlib
import numpy as np
import pandas as pd
from scipy import stats
path = pathlib.Path(
'~/dev/python/python1024/data/dataproc/006analysis/case').expanduser()
shop_path = path.joinpath('店铺基本数据.xlsx')
# 产品列表
product_list = [f'产品{c}' for c in string.ascii_uppercase]
# 产品价格/成本列表
product_price_list = np.random.randint(12, 31, len(product_list))
product_dict = dict(zip(product_list, product_price_list))
# 付款方式
pay_p = [0.5, 0.2, 0.1, 0.06, 0.1, 0.03, 0.01]
pay_list = ['微信', '支付宝', '银行卡', '饿了么', '美团', 'POS', '现金']
# 就餐形式
dining_p = [0.4, 0.2, 0.4]
dining_list = ['堂食', '打包', '外卖']
# 折扣率
discount_p = [0.4, 0.1, 0.1, 0.2, 0.1, 0.05, 0.05]
discount_list = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4]
# 订单备注
comment_p = [0.7, 0.05, 0.05, 0.03, 0.02, 0.01,
0.02, 0.02, 0.03, 0.03, 0.02, 0.02] # 概率分布
comment_list = ['', '少糖', '加糖', '少冰', '去冰',
'加冰', '加奶', '无奶', '少珍珠', '多珍珠', '去柠檬', '加柠檬']
# 订单明细,主要是单品,有部分2品、3品、4品、5品,分布概率
productn_p = [0.8, 0.1, 0.06, 0.02, 0.02]
def init_shops(n_shop=100):
"""
初始化基础数据
:param n_shop: 默认初始化100个门店
:return : df_shop, DataFrame
"""
shop_list = [f'SP{i:04d}' for i in range(n_shop)]
# 每个门店有[800~15000)个用户
shop_user_size = np.random.randint(800, 15000, n_shop)
# 门店x的用户范围:shop_user_list[x-1] ~ shop_user_list[x],初始值0
shop_user_list = shop_user_size.cumsum()
# 各门店用户ID起点,用户默认从小到大编号
shop_user_start = np.insert(shop_user_list[:-1], 0, 0)
# 各门店产品清单,每个门店[5,26)个产品
shop_product_list = [np.sort(np.random.choice(
product_list, np.random.randint(5, 26))) for i in range(n_shop)]
# 门店成立时间
shop_start_dates = np.random.choice(pd.period_range(
'2015-01-01', '2018-12-31', freq='M'), size=n_shop)
# 生成门店基本数据表
df_shop = pd.DataFrame({'门店ID': shop_list,
'成立时间': shop_start_dates,
'用户规模': shop_user_size,
'用户起点ID': shop_user_start,
'产品': shop_product_list})
return df_shop
def init_orders(shop):
"""
开始生成订单
shop: Series
:return (df_order, df_order_x), DataFrame
"""
df_order_all = []
df_order_x_all = []
# 门店所有用户
user_list = np.arange(shop['用户起点ID'], shop['用户起点ID']+shop['用户规模'])
for day in pd.date_range(shop['成立时间'].to_timestamp('D', 'start'), '2020-06-30', freq='D'):
# 每天随机生成96~1440个订单,随机分布在其用户群中
# TODO: 老用户在部分门店需要按某个比例淘汰,概率分布更准确
# freq从40S到600S随机
time_freq = np.random.randint(40, 601)
# 营业时间从上午6点到晚上10点
ot_list = pd.date_range(start=day+pd.Timedelta('6H'),
end=day+pd.Timedelta('22H'),
freq=f'{time_freq}S')
# 当天订单ID列表
n_order = ot_list.size # 当天订单量
order_id_list = ot_list.to_series().apply(
lambda x: f'{shop["门店ID"]}X{x.timestamp():.0f}')
order_id_list.index = np.arange(n_order)
# 用户二项概率分布
user_p = stats.binom.pmf(np.arange(user_list.size), n_order, p=0.5)
order_user = np.random.choice(user_list, p=user_p, size=n_order)
# 计算每个订单有多少个产品
order_product_nlist = np.random.choice(
np.arange(1, 6), p=productn_p, size=n_order)
# 生成当日订单明细表
x_order_prod = np.random.choice(
shop['产品'], size=order_product_nlist.sum())
x_orderid = order_id_list.loc[order_id_list.index.repeat(
order_product_nlist)]
x_order_prodn = np.random.choice(
[1, 2, 3], size=order_product_nlist.sum())
df_order_x = pd.DataFrame({'订单ID': x_orderid,
'产品': x_order_prod,
'数量': x_order_prodn})
df_order_x['单价'] = pd.S | eries(x_order_prod) | pandas.Series |
from typing import List
from typing import Optional
from typing import Callable
import numpy as np
import pandas as pd
import xarray as xr
from pathlib import Path
from sqlalchemy.orm import Session
import portfolio_management.paths as p
import portfolio_management.data.constants as c
from portfolio_management.io_utilities import pickle_dump
from portfolio_management.data.bases import Data
from portfolio_management.data.bases import Symbol
from portfolio_management.data.bases import Interval
from portfolio_management.data.utilities import session_scope
from portfolio_management.data.utilities import get_sessionmaker
def get_symbol_id(session: Session, symbol: str) -> int:
return session.query(Symbol.id).filter(Symbol.name == symbol).first()[0] # error
def get_interval_id(session: Session, interval: str) -> int:
return session.query(Interval.id).filter(Interval.value == interval).first()[0]
def get_symbol_list(session: Session) -> list:
symbol_tuples = session.query(Symbol.name).all()
return [symbol_tuple[0] for symbol_tuple in symbol_tuples]
def get_dataframe(
database_name: str,
symbol: str,
interval: str,
folder_path: Optional[str] = None,
echo: bool = False,
) -> pd.DataFrame:
folder_path = str(p.get_databases_folder_path(folder_path))
with session_scope(
get_sessionmaker(folder_path, database_name, echo),
expire_on_commit=False
) as session:
symbol_id = get_symbol_id(session=session, symbol=symbol)
interval_id = get_interval_id(session=session, interval=interval)
instances = session.query(Data).filter(
Data.symbol_id == symbol_id,
Data.interval_id == interval_id,
).all()
records = []
for instance in instances:
records.append(instance.__dict__)
dataframe = | pd.DataFrame(records) | pandas.DataFrame |
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from message_ix import Scenario, make_df
from message_ix.testing import make_dantzig, make_westeros
def test_make_df():
# DataFrame prepared for the message_ix parameter 'input' has the correct
# shape
result = make_df("input")
assert result.shape == (1, 12)
# …and column name(s)
assert result.columns[0] == "node_loc"
npt.assert_array_equal(result.columns[-2:], ("value", "unit"))
# Check correct behaviour when adding key-worded args:
defaults = dict(mode="all", time="year", time_origin="year", time_dest="year")
result = make_df("output", **defaults)
pdt.assert_series_equal(result["mode"], pd.Series("all", name="mode"))
pdt.assert_series_equal(result["time"], pd.Series("year", name="time"))
pdt.assert_series_equal(result["time_dest"], pd.Series("year", name="time_dest"))
def test_make_df_deprecated():
# Importing from the old location generates a warning
with pytest.warns(DeprecationWarning, match="from 'message_ix.utils' instead of"):
from message_ix.utils import make_df as make_df_unused # noqa: F401
base = {"foo": "bar"}
exp = | pd.DataFrame({"foo": "bar", "baz": [42, 43]}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# ### - PCA and Clustering for Cell painting Level-4 profiles (per dose treament)
#
# #### - Use Silhouette and Davies Bouldin scores to assess the number of clusters from K-Means
# #### - Use BIC scores to assess the number of clusters from Gaussian Mixture Models (GMM)
#
# [reference](https://sites.northwestern.edu/msia/2016/12/08/k-means-shouldnt-be-our-only-choice/)
# [refeerences](https://gdcoder.com/silhouette-analysis-vs-elbow-method-vs-davies-bouldin-index-selecting-the-optimal-number-of-clusters-for-kmeans-clustering/)
# In[1]:
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import scipy.cluster.hierarchy as shc
from sklearn.metrics import pairwise_distances
from sklearn.cluster import KMeans, AgglomerativeClustering
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
from sklearn.metrics import silhouette_score
from sklearn.metrics import davies_bouldin_score
from sklearn.mixture import GaussianMixture as GMM
import os
import pathlib
import pandas as pd
import numpy as np
import re
from os import walk
from collections import Counter
import random
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import seaborn as sns
from pycytominer.cyto_utils import infer_cp_features
sns.set_style("darkgrid")
##sns.set_palette(["red", "green", "orange","blue","gray","purple"])
sns.set_context("talk")
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
# In[2]:
number_of_pcs = 300
# In[3]:
cp_level4_path = '../1.Data-exploration/Profiles_level4/cell_painting/cellpainting_lvl4_cpd_replicate_datasets'
output_path = "results/cell_painting"
# In[4]:
# Load common compounds
common_file = pathlib.Path(
"..", "6.paper_figures", "data", "significant_compounds_by_threshold_both_assays.tsv.gz"
)
common_df = | pd.read_csv(common_file, sep="\t") | pandas.read_csv |
import ntpath
from datetime import datetime as dt
import os
import pandas as pd
import numpy as np
import math
import sqlite3
# clean the original raw data by storing only the columns that we need, and removing the rest.
def clean(from_path, to_path, columns):
def convert_date(date):
if date == '':
return None
else:
if len(date.split('-')) == 3:
return date
year = date.split('/')[-1]
if len(year) == 4:
return dt.strptime(date, '%d/%m/%Y').date()
else:
return dt.strptime(date, '%d/%m/%y').date()
def convert_score(score):
if math.isnan(score):
return score
else:
return int(score)
df = pd.read_csv(from_path, error_bad_lines=False)
df = df[columns]
df = df[ | pd.notnull(df['Date']) | pandas.notnull |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 7 22:13:43 2020
@author: <NAME>
"""
#==================================
#ARIMA
#==================================
import os
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pylab import rcParams
rcParams['figure.figsize'] = 10, 6
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from pmdarima.arima import auto_arima
from sklearn.metrics import mean_squared_error, mean_absolute_error
import math
hkexdata = pd.read_csv("C:/Users/<NAME>/Desktop/QEA Files/My Thesis/hkex.csv", index_col='Date', parse_dates=['Date'])
pd.set_option('display.max_columns', 100)
pd.set_option('precision', 4)
#Plot close price
plt.figure(figsize=(10,6))
plt.grid(True)
plt.xlabel('Date')
plt.ylabel('Close price')
plt.plot(hkexdata['Close'], color='blue')
plt.title('HKEX close price')
plt.show()
df_close = hkexdata['Close']
#Test for staionarity
def stationarity_check(timeseries):
#Determing rolling statistics
rollingmean = timeseries.rolling(12).mean()
rollingstd = timeseries.rolling(12).std()
#Plot rolling statistics:
plt.grid(True)
plt.xlabel('Date')
plt.ylabel('Close price')
plt.plot(timeseries, color='blue',label='Original')
plt.plot(rollingmean, color='red', label='Rolling Mean')
plt.plot(rollingstd, color='black', label = 'Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean and the Standard Deviation')
plt.show(block=False)
print("Results of dickey fuller test")
adftest = adfuller(timeseries,autolag='AIC')
result = pd.Series(adftest[0:4],index=['Test Statistics','p-value','No. of lags used','Number of observations used'])
for key,values in adftest[4].items():
result['critical value (%s)'%key] = values
print(result)
stationarity_check(df_close)
#Decompose time series
result = seasonal_decompose(df_close, model='multiplicative', freq = 30)
fig = plt.figure()
fig = result.plot()
fig.set_size_inches(10, 6)
#Take a log of the series, find the rolling mean and std. of the series.
from pylab import rcParams
df_log = np.log(df_close)
moving_avg = df_log.rolling(12).mean()
std_dev = df_log.rolling(12).std()
#Plot the findings
plt.grid(True)
plt.xlabel('Date')
plt.ylabel('Log of Close price')
plt.legend(loc='best')
plt.title('Moving Average')
plt.plot(moving_avg, color="blue", label = "Mean")
plt.plot(std_dev, color ="black", label = "Standard Deviation")
plt.legend()
plt.show()
#Split data into train-test sets
strain = '2015-01-01'
etrain = '2018-12-31'
stest = '2019-01-01'
etest = '2019-12-31'
train_data = df_log.loc[strain:etrain]
test_data = df_log.loc[stest:etest]
print('train_data:', train_data.shape)
print('test_data:', test_data.shape)
#Plot split data
plt.figure(figsize=(10,6))
plt.grid(True)
plt.xlabel('Date')
plt.ylabel('Close price')
plt.plot(df_log, 'blue', label='Train data')
plt.plot(test_data, 'red', label='Test data')
plt.legend()
#Determine optimal values for p,q minimizing AIC; let the algorithm decide d. Use ADF test.
model_autoARIMA = auto_arima(train_data, start_p=0, start_q=0,
test='adf',
max_p=10, max_q=10,
m=1,
d=None,
seasonal=False,
start_P=0,
D=0,
trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
print(model_autoARIMA.summary())
#Review the residual plots
model_autoARIMA.plot_diagnostics(figsize=(10,6))
plt.show()
#Create ARIMA(p,d,q) model
model = ARIMA(train_data, order=(1, 1, 1))
fitted = model.fit(disp=-1)
print(fitted.summary())
# Forecast the stock prices on the test set keeping 95% confidence level
fc, se, conf = fitted.forecast(246, alpha=0.05)
fc_series = pd.Series(fc, index=test_data.index)
lower_series = pd.Series(conf[:, 0], index=test_data.index)
upper_series = pd.Series(conf[:, 1], index=test_data.index)
train_data_=np.exp(train_data).astype(int)
test_data_=np.exp(test_data).astype(int)
fc_series_=np.exp(fc_series).astype(int)
#Plot actual, predicted prices
plt.figure(figsize=(10,6), dpi=100)
plt.grid(True)
plt.plot(train_data_, color = 'blue', label='Training')
plt.plot(test_data_, color = 'red', label='Actual Stock Price')
plt.plot(fc_series_, color = 'black',label='Predicted Stock Price')
plt.title('HKEX Stock Price Prediction')
plt.xlabel('Date')
plt.ylabel('Close price')
plt.legend(loc='upper left', fontsize=8)
plt.show()
#Performance scores
mse = mean_squared_error(test_data_, fc_series_)
print('MSE: '+str(mse))
mae = mean_absolute_error(test_data_, fc_series_)
print('MAE: '+str(mae))
rmse = math.sqrt(mean_squared_error(test_data_, fc_series_))
print('RMSE: '+str(rmse))
mape = np.mean(np.abs( test_data_ - fc_series_)/np.abs(test_data_))
print('MAPE: '+str(mape))
#==================================
#FACEBOOK'S PROPHET
#==================================
#Facebook's Prophet
from fbprophet import Prophet
hkexdata = pd.read_csv("C:/Users/<NAME>/Desktop/QEA Files/My Thesis/hkex.csv", parse_dates=['Date'])
hkexdata.reset_index(drop=False, inplace=True)
#Name the input dataframe’s columns as ds and y.
hkexdata.rename(columns={'Date': 'ds', 'Close': 'y'}, inplace=True)
hkexdata = hkexdata.drop(['Open', 'Low', 'Volume', 'High', 'Adj Close'], axis=1)
#Split the series into the training and test sets:
train_indices = hkexdata.ds.apply(lambda x: x.year) < 2019
df_train = hkexdata.loc[train_indices].dropna()
df_test = hkexdata.loc[~train_indices].reset_index(drop=True)
print(df_train.head())
print(df_test.head())
#Create the model and fit the data:
prophet_model = Prophet(seasonality_mode='additive', daily_seasonality = True)
prophet_model.add_seasonality(name='monthly', period=30.5, fourier_order=5)
prophet_model.fit(df_train)
#Forecast the HKEX prices and plot the results:
df_future = prophet_model.make_future_dataframe(periods=365)
df_prediction = prophet_model.predict(df_future)
print(df_prediction.head(5))
#Plot predictions
prophet_model.plot(df_prediction)
#Inspect the decomposition of the time series:
prophet_model.plot_components(df_prediction)
#Merge the test set with the forecasts
selected_columns = ['ds', 'yhat_lower', 'yhat_upper', 'yhat']
df_prediction = df_prediction.loc[:, selected_columns].reset_index(drop=True)
#print(df_pred.head(3))
df_test = df_test.merge(df_prediction, on=['ds'], how='left')
#print(df_test.head(3))
df_test.ds = | pd.to_datetime(df_test.ds) | pandas.to_datetime |
from pickle import TRUE
from flask import *
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import random
import socket
import os
import time
import rss23
pid = 3
def rssrd(r, xy,client):
f = {}
g = {}
R = {}
esend = {}
epk1 = {}
for j in range(rss23.n):
if j+1==pid:
continue
f[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
g[pid,(j+1)] = round(random.uniform(3*(10**7),4*(10**7)),6)
R[pid,(j+1)] = random.uniform(11*(10**18),19*(10**18))
for j in range(rss23.n):
if j+1==pid:
continue
prod = f[pid,(j+1)] * r
esend[pid,(j+1)] = ( rss23.public_key.encrypt(prod) , f[pid,(j+1)] )
for j in range(1,4):
if j == pid:
rss23.client_send(esend, client)
else:
print("Ready to receive")
rss23.client_receive(pid, client)
print("Received data")
print(rss23.erecive)
fj = {}
for i in rss23.erecive.keys():
epk1[i[0],i[1]]=( rss23.erecive[i][0] * g[i[1],i[0]] * xy + R[i[1],i[0]] , g[i[1],i[0]] )
fj[i] = rss23.erecive[i][1]
print("fj ",fj,"\n")
print()
for j in range(1,4):
if j == pid:
rss23.epk_send(epk1, client)
else:
rss23.epk_receive(pid, client)
print("Received dat 01a")
print(rss23.epkfinal)
share1 = {}
share2 = {}
for i in rss23.epkfinal.keys():
nr = rss23.private_key.decrypt(rss23.epkfinal[i][0])
dr = rss23.epkfinal[i][1] * f[i]
share1[i] = nr/dr
share2[i] = - R[i] / ( fj[(i[1],i[0])] * g[i] )
print('ok')
t = round(random.uniform((-0.5),(0.5)),6)
si = 0
for i in share1.keys():
si += share1[i] + share2[i] + ( r + t ) * xy
rss23.s = []
for j in range(1,4):
if j == pid:
rss23.si_send(si, client)
else:
rss23.si_receive(client)
rss23.s.append(si)
print(rss23.s)
return sum(rss23.s)
def rss(d2,client):
print("**********************102********************")
print(type(d2))
x, y = d2['x'], d2['y']
alphax = round(random.uniform((-0.5),(0.5)),6)
alphay = round(random.uniform((-0.5),(0.5)),6)
x = x + alphax
y = y + alphay
r = round(random.uniform(3000,4000),6)
sx = rssrd(r, x,client)
sy = rssrd(r, y,client)
return sx/sy
pat=os.getcwd() #to get working directory
print("CurrentPath : "+pat)
upfile=""
cols=[] #to store column names
TstCols=[]
Origional_data=[]
normalized_data=[] #to store normalized data
rotateded_data=[] #to store rotateded data about a given angle
alpha_graph=[] #This will hold file-names of alpha graph images
beta_graph=[] #This will hold file-names of beta graph images
Test_Data=[]
Normallized_Test_Data=[]
Origional_test_data=[]
rotateded_test_data=[]
#DATAFRAMES initialization
ndata= pd.DataFrame()
dat = pd.DataFrame()
dat4 = pd.DataFrame()
#Function to rotate data
def rotate_mult(dat,a,b):
cos_a = round(math.cos(a),4)
sin_a = round(math.sin(a), 4)
cos_b = round(math.cos(b),4)
sin_b = round(math.sin(b),4)
x = [[cos_a,-sin_a,0,0],[ sin_a, cos_a,0,0],[0,0, cos_b, -sin_b],[0,0, sin_b, cos_b]]
prod=np.dot(dat,x) #Rotating data (Dot product of data with x)
return prod
def clear():
#Clear all lists
angles.clear()
alpha_graph.clear()
beta_graph.clear()
cols.clear()
Origional_data.clear()
normalized_data.clear()
rotateded_data.clear()
Test_Data.clear()
TstCols.clear()
Normallized_Test_Data.clear()
Origional_test_data.clear()
rotateded_test_data.clear()
#delete older graphs
dir = pat+'/static'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
app = Flask(__name__)
@app.route('/')
def upload():
clear()
os.chdir(pat) #Switch to working directory
return render_template("file_upload_form.html")
@app.route('/Showdata', methods = ['POST'])
def success():
if request.method == 'POST':
f = request.files['file']
f.save(f.filename)
upfile = f.filename
data = pd.read_csv(upfile)
for i in data.values.tolist():
Origional_data.append(i)
print ("*********Origional data******************")
print(pd.DataFrame(Origional_data))
#ndata = data.select_dtypes(include=np.number)
ndata = data.iloc[:,:-1].select_dtypes(include=np.number)
os.remove(upfile)
print ("*********Origional Numeric data******************")
for i in data.values.tolist():
Origional_data.append(i)
print(ndata)
for col_name in ndata.columns:
cols.append(col_name)
print(cols)
#Normallized data
dat = ndata.apply(lambda x: 5*(x - x.min()) / (x.max() - x.min()))
print ("*********Normallized data******************")
for row in dat.values.tolist():
normalized_data.append(row)
print(pd.DataFrame(normalized_data,columns=cols))
length=len(dat.columns)
cnt = length//4
if length % 4>0:
cnt+=1
print("length = ",length)
print('count = ',cnt)
i=0
j=4
k=0
while k<cnt:
if j<length:
dat4 = dat.iloc[:,i:j]
i=j
j+=4
else:
dat4 = dat.iloc[:,-4:]
rot = pd.DataFrame(columns=[0,1,2,3])
dat4 = dat4.values.tolist()
orgdata = pd.DataFrame(dat4)
for a in range(360):
R0 = math.radians(a)
prod = rotate_mult(dat4,R0,R0) #Roating data
rotdata = pd.DataFrame(prod)
osr = orgdata.subtract(rotdata)
rtf = osr.var()
rot.loc[len(rot.index)]=[round(rtf[0],5),round(rtf[1],5),round(rtf[2],5),round(rtf[3],5)]
print("upfile : "+upfile)
os.chdir(pat+"\static")
i=0
fig = plt.figure(figsize=(10,5))
plt.plot(rot[0],label='1')
plt.plot(rot[1],label='2')
plt.ylabel('Variance')
plt.xlabel('alpha')
plt.legend(loc='upper left')
#plt.show()
plt.savefig(upfile+'_Alpha'+str(k)+'.png')
alpha_graph.append(upfile+'_Alpha'+str(k)+'.png')
fig = plt.figure(figsize=(10,5))
plt.plot(rot[2],label='3')
plt.plot(rot[3],label='4')
plt.ylabel('Variance')
plt.xlabel('beta')
plt.legend(loc='upper left')
#plt.show()
plt.savefig(upfile+'_Beta'+str(k)+'.png')
beta_graph.append(upfile+'_Beta'+str(k)+'.png')
k+=1
print(len(normalized_data))
return render_template("OrgData.html",r=ndata.values.tolist(),cols=cols,dat=dat.values.tolist(),zip=zip,round=round,l=len(normalized_data))
@app.route('/rotate')
def graph():
print(alpha_graph)
print(beta_graph)
p=os.getcwd()
print("Graph directory : ",p)
return render_template("graphs.html",Ag=alpha_graph,Bg=beta_graph,zip=zip)
angles=[]
@app.route('/continue',methods = ['POST'])
def enter_angle():
#Rotate about a given angle
alpha = math.radians(int(request.form['alpha']))
angles.append(int(request.form['alpha']))
beta = math.radians(int(request.form['beta']))
angles.append(int(request.form['beta']))
dat=pd.DataFrame(normalized_data,columns=cols)
length=len(dat.columns)
cnt = length//4
if length % 4>0:
cnt+=1
i=0
j=4
k=0
while k<cnt:
if j<length:
dat4 = dat.iloc[:,i:j]
else:
dat4 = dat.iloc[:,-4:]
i=length-4
j=length
dat4 = dat4.values.tolist()
prod = rotate_mult(dat4,alpha,beta) #Roating data about a given angle
dat.iloc[:,i:j] = prod
i=j
j+=4
k+=1
dat = dat.round(decimals=1)
data=pd.DataFrame(Origional_data)
dat['y'] = data.iloc[:,-1]
cols.append('y')
#Store rotated data in a list
for row in dat.values.tolist():
rotateded_data.append(row)
print(pd.DataFrame(rotateded_data))
return render_template("RotatedData.html",cols=cols,r=rotateded_data,round=round,l=len(rotateded_data),z=zip)
@app.route('/Show-Test',methods = ['POST'])
def showTestData():
if request.method == 'POST':
f = request.files['file']
f.save(f.filename)
upfile = f.filename
data = pd.read_csv(upfile)
for i in data.values.tolist():
Origional_test_data.append(i)
print ("*********Origional Test data******************")
print( | pd.DataFrame(Origional_test_data) | pandas.DataFrame |
import functools
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
from selection.tests.instance import gaussian_instance
from selection.learning.utils import (partial_model_inference,
pivot_plot,
lee_inference)
from selection.learning.core import normal_sampler, keras_fit
from selection.learning.learners import sparse_mixture_learner
def simulate(n=2000, p=500, s=20, signal=(3 / np.sqrt(2000), 4 / np.sqrt(2000)), sigma=2, alpha=0.1, B=10000):
# description of statistical problem
X, y, truth = gaussian_instance(n=n,
p=p,
s=s,
equicorrelated=False,
rho=0.5,
sigma=sigma,
signal=signal,
random_signs=True,
scale=False)[:3]
print(np.linalg.norm(truth))
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
smooth_sampler = normal_sampler(S, covS)
def meta_algorithm(XTX, XTXi, lam, sampler):
p = XTX.shape[0]
success = np.zeros(p)
loss = rr.quadratic_loss((p,), Q=XTX)
pen = rr.l1norm(p, lagrange=lam)
scale = 0.
noisy_S = sampler(scale=scale)
loss.quadratic = rr.identity_quadratic(0, 0, -noisy_S, 0)
problem = rr.simple_problem(loss, pen)
soln = problem.solve(max_its=300, tol=1.e-10)
success += soln != 0
return tuple(sorted(np.nonzero(success)[0]))
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = np.linalg.norm(resid)**2 / (n-p)
lam = 4. * np.sqrt(n)
selection_algorithm = functools.partial(meta_algorithm, XTX, XTXi, lam)
# run selection algorithm
df = partial_model_inference(X,
y,
truth,
selection_algorithm,
smooth_sampler,
fit_probability=keras_fit,
fit_args={'epochs':30, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
success_params=(1, 1),
B=B,
alpha=alpha,
learner_klass=sparse_mixture_learner)
lee_df = lee_inference(X,
y,
lam,
dispersion,
truth,
alpha=alpha)
return pd.merge(df, lee_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
for i in range(500):
df = simulate(B=10000)
csvfile = 'lee_multi_500.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try: # concatenate to disk
df = pd.concat([df, | pd.read_csv(csvfile) | pandas.read_csv |
import pandas as pd
filepath_dict = {'yelp': 'data/sentiment_analysis/yelp_labelled.txt',
'amazon': 'data/sentiment_analysis/amazon_cells_labelled.txt',
'imdb': 'data/sentiment_analysis/imdb_labelled.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = | pd.read_csv(filepath, names=['sentence', 'label'], sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 19:51:21 2018
@author: Bob
"""
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords
from sqlalchemy import create_engine
from config import config
import pandas as pd
import numpy as np
import unidecode
import psycopg2
import re
import click
from tqdm import tqdm
from mpproj.routefinder.StyleInformation import *
def MPAnalyzer():
'''Finishes cleaning routes using formulas that require information about
the whole database.
The Bayesian rating system, route clustering algorithm and calculation of
TFIDF values require information about all routes, and not just one that is
of interest. Therefore, this file must be run after all data collection
has finished. This function is a handler for six functions:
- bayesian_rating: Calculates the weighted quality rating for each
route
- route_clusters: Groups routes together based on geographic distance
- idf: Calculates inverse-document-frequency for words in the route
descriptions
- tfidf: Calclates term-frequency-inverse-document-frequency for words
in route descriptions
- normalize: Normalizes vectors for TFIDF values
- find_route_styles: Compares routes to the ideal to help categorize
Returns:
Updated SQL Database
'''
print('Connecting to the PostgreSQL database...', end='')
engine = create_engine(
'postgresql+psycopg2://postgres:postgres@localhost:5432/routes')
params = config.config()
conn = psycopg2.connect(**params)
cursor = conn.cursor()
print('Connected')
tqdm.pandas()
def tfidf(min_occur=0.001, max_occur=0.9):
''' Calculates Term-Frequency-Inverse-Document-Frequency for a body of
documents.
Term-Frequency-Inverse-Document-Frequency(TFIDF) is a measure of the
importance of words in a body of work measured by how well they help to
distinguish documents. Words that appear frequently in documents score
high on the Term-Frequency metric, but if they are common across the
corpus, they will have low Inverse-Document-Frequency scores. TFIDF
can then be used to compare documents to each other, or, in this case,
to documents with known topics.
TFIDF = TF * IDF
TF = Term Frequency
IDF = Inverse Document Frequency
Args:
min_occur(int): The minimum number of documents that a word has to
appear in to be counted. Included to ignore words that only
appear in a few documents, and are therefore not very useful
for categorization.
max_occur(int): The maximum number of documents that a word can
appear in to be counted. This is included to ignore highly
common words that don't help with categorization.
Returns:
routes(pandas Dataframe): Holds route-document information,
including term-frequency, inverse-document-frequency, TFIDF,
and normalized TFIDF values
Updated SQL Database: Updates the TFIDF table on main DB with the
routes dataframe
'''
print('Getting number of routes', end=' ', flush=True)
cursor.execute('SELECT COUNT(route_id) FROM Routes')
num_docs = cursor.fetchone()[0]
print(num_docs)
print('Getting route text data', flush=True)
min_occur *= num_docs
max_occur *= num_docs
query = 'SELECT route_id, word, tf FROM Words'
routes = pd.read_sql(query, con=conn, index_col='route_id')
print('Removing non-essential words.', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
weed_out,
min_occur=min_occur,
max_occur=max_occur)\
.set_index('route_id')
print('Getting IDF', flush=True)
routes = routes.groupby('word', group_keys=False)
routes = routes.progress_apply(
idf,
num_docs=num_docs).set_index('route_id')
print('Calculating TFIDF', flush=True)
routes['tfidf'] = routes['tf'] * routes['idf']
print('Normalizing TFIDF values', flush=True)
routes = routes.groupby(routes.index, group_keys=False)
routes = routes.progress_apply(lambda x: normalize('tfidf', table=x))
print('Writing TFIDF scores to SQL', flush=True)
routes = routes.set_index('route_id')
routes = routes[['word', 'idf', 'tfidfn']]
# This will take a long time
routes.to_sql('TFIDF', con=engine, if_exists='replace', chunksize=1000)
def weed_out(table, min_occur, max_occur):
'''Removes words that are too common or too rare
Args:
table(Series): Instances of a word
min_occur: Fewest number acceptable
max_occur: Greatest number acceptable
Returns:
table: updated series'''
if min_occur < len(table) < max_occur:
return table.reset_index()
def idf(word, num_docs):
''' Finds inverse document frequency for each word in the selected
corpus.
Inverse document frequency(IDF) is a measure of how often a word
appears in a body of documents. The value is calculated by:
IDF = 1 + log(N / dfj)
N = Total number of documents in the corpus
dfj = Document frequency of a certain word, i.e., the number of
documents that the word appears in.
Args:
word(pandas dataframe): A dataframe composed of all instances of a
word in a corpus.
num_docs(int): The total number of documents in the corpus
Returns:
word(pandas dataframe): The same document with the calculated IDF
score appended.
'''
word['idf'] = 1 + np.log(num_docs / len(word))
return word.reset_index()
def normalize(*columns, table, inplace=False):
''' Normalizes vector length.
Vector values must be normalized to a unit vector to control for
differences in length. This process is done by calculating the length
of a vector and dividing each term by that value. The resulting
'unit-vector' will have a length of 1.
Args:
table(pandas dataframe): Table hosting vector to be normalized
*columns(str): Names of columns to be normalized
inplace(Boolean, default = False):
If inplace=False, adds new columns with normalized values.
If inplace=True, replaces the columns.
Returns:
table(pandas dataframe): Updated dataframe with normalized values.
'''
for column in columns:
if not inplace:
column_name = column + 'n'
elif inplace:
column_name = column
length = np.sqrt(np.sum(table[column] ** 2))
table[column_name] = table[column] / length
return table.reset_index()
def fill_null_loc():
"""Fills empty route location data.
Not all routes have latitude and longitude coordinates, so we must use
the coordinates of their parent area instead as a rough estimate. This
function first grabs all routes with no data, then fills in the data
with the lowest level area it can, going up as many areas as needed
until it finds one with proper coordinates.
Returns:
Updated SQL Database
"""
print('Filling in empty locations', flush=True)
# Select a route without location data
cursor.execute('''
SELECT route_id, area_id, name FROM Routes
WHERE latitude is Null OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
while route is not None:
# Route ID
rid = route[0]
# From ID
fid = route[1]
name = route[2]
print(f'Finding location information for {name}')
# Loops until it finds proper data
lat, long = None, None
while lat == None or long == None:
# Gets latitude and longitude from parent area
cursor.execute(f'''
SELECT
latitude,
longitude,
from_id
FROM Areas
WHERE id = {fid}
LIMIT 1''')
loc = cursor.fetchone()
lat, long = loc[0], loc[1]
fid = loc[2]
# Updates DB
cursor.execute(f'''
UPDATE Routes
SET
latitude = {lat},
longitude = {long}
WHERE route_id = {rid}''')
conn.commit()
cursor.execute('''
SELECT
route_id,
area_id,
name
FROM Routes
WHERE
latitude is Null
OR longitude is Null
LIMIT 1''')
route = cursor.fetchone()
def route_clusters(routes):
''' Clusters routes into area groups that are close enough to travel
between when finding climbing areas.
Routes can be sorted into any number of sub-areas below the 'region'
parent. By clustering the routes based on latitude and longitude
instead of the name of the areas and parent areas, the sorting
algorithm will be able to more accurately determine which routes are
close together. This function uses SciKit's Density Based Scan
clustering algorithm. The algorithm works by grouping points together
in space based on upper-limits of distance and minimum numbers of
members of a cluster. More generally, the algorithm first finds the
epsilon neighborhood of a point. This is the set of all points whose
distance from a given point is less than a specified value epsilon.
Then, it finds the connected core-points, which are the points that
have at least the minimum number of connected points in its
neighborhood. Non-core points are ignored here. Finally, the
algorithm assigns each non-core point to a nearby cluster if is within
epsilon, or assigns it to noise if it is not.
The advantages of this is that the scan clusters data of any shape, has
a robust response to outliers and noise, and that the epsilon and min
points variables can be adjusted.
This function returns the label/name for the cluster that a route
appears in, as well as the number of other routes in that same cluster.
This will allow the sorting algorithm to more heavily weight routes
that are clustered near others.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifies
- latitude (float)
- longitude (float)
Returns:
routes(pandas df): Updated with clustered area group number:
- route_id (int, unique): Unique route identifies
- area_group (int): Cluster id
'''
# Route location
lats = routes['latitude']
longs = routes['longitude']
locs = []
for x in range(len(lats)):
locs.append((lats.iloc[x], longs.iloc[x]))
# Converted into df
locs = StandardScaler().fit_transform(locs)
# Max distance in latitude
epsilon = 0.0007
# Min number of routes in a cluster
min_routes = 3
# Distance baced scan
db = DBSCAN(eps=epsilon, min_samples=min_routes).fit(locs)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
# Cluster names
labels = db.labels_
unique, counts = np.unique(labels, return_counts=True)
counts = dict(zip(unique, counts))
# Number of routes in the same cluster as a given route
area_counts = []
for label in labels:
if label >= 0:
# Counts number of routes
area_counts.append(counts[label])
# Areas are given a cluster id of -1 if the are not part of a
# cluster
elif label == -1:
# If so, there is only 1 route in their 'cluster'
area_counts.append(1)
routes['area_group'] = labels
routes['area_counts'] = area_counts
routes = routes[['area_group', 'area_counts']]
return routes
def bayesian_rating(routes):
''' Updates route quality with weighted average.
The Bayesian average rating system helps to mitigate the effects of
user ratings for routes that only have a few reviews. The weighted
rating works by first finding the average rating for all routes, and
using that to bring low-rated routes up and high-rated routes down.
The result - the Bayes rating - is an updated rating weighted by the
average number of stars across all routes. The weight decreases
according to the number of votes cast.
Bayesian rating = (r * v) + (a * 10) / (v + 10)
r = Route rating
v = Number of votes
a = Average rating across all routes
Essentially, the function gives each route phantom-users who all give
the route the average score. For routes with a high number of ratings
the effect of the additional phantom users is minimal, but for routes
with only one or two actual user ratings, the effect is large. This
keeps 4-star rated routes from dominating the sorting algorithm if they
only have a few votes, and helps promote unrated routes that may be of
high quality.
Args:
routes(pandas df): Pulled from cleaned route SQL DB with columns:
- route_id (int, unique): Unique route identifiers
- stars (float): Raw average rating
- votes (int): Number of user ratings
Returns:
routes(pandas df): Updated dataframe with Bayes rating and columns:
- route_id (int, unique): Unique route identifies
- bayes (float): Weighted average rating
'''
# Average rating of all routes
stars = pd.read_sql('SELECT stars FROM Routes', con=conn)
avg_stars = np.mean(stars)['stars']
# Weighted Bayesian rating
routes['bayes'] = round((((routes['votes'] * routes['stars'])
+ avg_stars * 10) / (routes['votes'] + 10)), 1)
return routes['bayes'].to_frame()
def find_route_styles(*styles, path='Descriptions/'):
''' Returns weighted scores that represent a route's likelihood of
containing any of a series of features, e.g., a roof, arete, or crack.
Route names, descriptions, and user comments can indicate the presence
of rock and route features. Term-Frequency-Inverse-Document-Frequency
(TFIDF) values for the blocks of text gathered for each route can be
compared to 'archetypal' routes to glean insight into these features.
This comparison is further clarified using Bayesian statistics to
measure the credibility of the comparision, and is then adjusted to
reflect that. At present, each route is compared against archetypal
routes with the following features:
Aretes - A sharp vertical edge of a block, cliff or boulder
Chimney - A large vertical crack that a climber can fit in and
climb using opposing pressure
Crack - Smaller cracks ranging from finger-sized to a few inches
wide (off-width)
Slab - Low-angle rock faces (less than vertical)
Overhang - Roofs, caves or more-than-vertical rock faces
More styles or archetypes can be added in the future by creating .txt
files and adding them to the 'Descriptions' sub-folder, then adding the
style to the *styles argument.
Args:
*styles(str): The name of the files that each route will be
compared against.
path(str): Folder location of the Database
Returns:
Updated SQL Database with weighted route scores
'''
def text_splitter(text):
'''Splits text into words and removes punctuation.
Once the text has been scraped it must be split into individual
words for further processing. The text is all put in lowercase,
then stripped of punctuation and accented letters. Tokenizing helps
to further standardize the text, then converts it to a list of
words. Each word is then stemmed using a Porter stemmer. This
removes suffixes that make similar words look different, turning,
for example, 'walking' or 'walked' into 'walk'. Stop words are
also filtered out at this stage.
Args:
text(str): Single string of text to be handled
Returns:
text(list): List of processed words.'''
# Converts to lowercase
text = text.lower()
# Strips punctuation and converts accented characters to unaccented
text = re.sub(r"[^\w\s]", '', text)
text = unidecode.unidecode(text)
# Tokenizes words and returns a list
text = word_tokenize(text)
# Remove stopwords
stop_words = set(stopwords.words('english'))
# Stems each word in the list
ps = PorterStemmer()
text = [ps.stem(word) for word in text if word not in stop_words]
return text
def archetypal_tf(*styles, path):
''' Returns term-frequency data for descriptions of archetypal
climbing routes and styles. This will be used later to categorize
routes.
Term-Frequency = t / L
t = Number of appearances for a word in a document
L = Number of total words in the document
Args:
*styles(str): Name of .txt file to parse. Can either be the
plain name or have the .txt suffix
path(str): Path to folder with route descriptions
Returns:
tf.csv(CSV File): CSV File of term frequency for each style.
This will help determine if TF values are what is expected
when adding new styles.
archetypes(Pandas Dataframe): Holds words term-frequency values
for words in the files.'''
# Initializes Dataframe
archetypes = pd.DataFrame()
for style in styles:
# Formats suffix
if style.endswith('.txt'):
# Opens .txt file
try:
file = open(path + style)
style = style[:-4]
# Returns errors
except OSError as e:
return e
else:
try:
file = open(path + style + '.txt')
except OSError as e:
return e
# Creates single block of text
text = ''
for line in file:
text += line
# Splits and processes text
text = text_splitter(text)
# Length of document in words
length = len(text)
# Counts appearances of each word
text = pd.DataFrame({'word': text})['word']\
.value_counts()\
.rename('counts')\
.to_frame()
# Calculates Term-Frequency
text[style] = text['counts'].values / length
text = text[style]
# Creates master Dataframe of Termfrequency data for each style
archetypes = pd.concat([archetypes, text], axis=1, sort=True)
archetypes.to_csv(path + 'TF.csv')
return archetypes
def archetypal_idf(words):
''' Findes inverse document frequency (IDF) for each word in the
archetypal style documents.
The archetypal documents should not be included in the calculation
of IDF values, so this function just pulls the IDF values from the
database after they are calculated. IDF is a measure of how often a
word appears in a body of documents. The value is calculated by:
IDF = 1 + log(N / dfj)
N = Total number of documents in the corpus
dfj = Document frequency of a certain word, i.e., the number
of documents that the word appears in.
Args:
word(list): All unique words in all the archetype documents
Returns:
archetypes(pandas dataframe): IDF values for each word pulled
from the Database.'''
# Formats query to include list of unique words
query = f'''
SELECT
DISTINCT(word),
idf
FROM "TFIDF"
WHERE word IN {words}'''
# Pulls SQL data into Pandas dataframe
archetypes = pd.read_sql(query, con=conn, index_col='word')
return archetypes
def get_routes(route_ids=None):
'''Creates Pandas Dataframe of normalized TFIDF values for each
word in each route description.
Args:
route_ids: Optional. Allows for a slice to be parsed.
Returns:
routes(Pandas Series): MultiIndex series with indexes
'route_id' and 'word' and column 'tfidfn' - Normalized TFIDF'''
# Pulls route_id, word, and normalized TFIDF value
if route_ids is None:
query = '''
SELECT
route_id,
word,
tfidfn
FROM "TFIDF"'''
else:
route_ids = tuple(route_ids)
query = f'''
SELECT
route_id,
word,
tfidfn
FROM "TFIDF"
WHERE route_id in {route_ids}'''
# Creates Pandas Dataframe
routes = pd.read_sql(
query,
con=engine,
index_col=['route_id', 'word'])
routes = routes.squeeze()
return routes
def get_word_count(route_ids=None):
'''Finds length of route description in words.
Args:
route_ids: Optional. Allows for a slice to be parsed
Returns:
word_count(Pandas dataframe): Dataframe with index route_id and
column 'word_count' - length of a route description in
words'''
# Pulls route_id and word_count for each route
if route_ids is None:
query = 'SELECT route_id, word_count FROM Words'
else:
route_ids = tuple(route_ids)
query = f'''
SELECT
route_id,
word_count
FROM Words
WHERE route_id in {route_ids}'''
# Calculates document length
word_count = pd.read_sql(query,
con=conn,
index_col='route_id').groupby(level=0)
# We will take the log of the word count later, so we cannot leave
# zeroes in the series
word_count = word_count.progress_apply(lambda x: np.sum(x) + 0.01)
word_count.fillna(0.01, inplace=True)
return word_count
def cosine_similarity(route, archetypes):
'''Compares routes to archetypes to help categorize route style.
Cosine similarity is the angle between two vectors. Here, the
normalized TFIDF values for each word in the route description and
archetype documents serve as the coordinates of the vector. Finding
the cosine similarity is therefore simply their dot-product.
Cosine Similarity = Σ(ai * bi)
ai = TFIDF for a word in the route description
bi = TFIDF for the same word in the archetype document.
The similarity will range between 0 and 1, 1 being identical and 0
having no similarity.
Args:
route(Pandas dataframe): MultiIndex frame with indexes route_id
and word and columns normalized TFDIF values
archetypes(Pandas dataframe): Frame with index word and columns
normalized TFIDF values.
Returns:
terrain(Pandas dataframe): Frame with columns for each style,
holding cosine simlarity values.'''
try:
rid = route.index[0][0]
except:
return
route = archetypes.multiply(route, axis=0)
terrain = pd.DataFrame(index=[rid])
for column in route:
cosine_sim = np.sum(route[column])
terrain[column] = cosine_sim
return terrain
def score_routes(*styles, word_count, path, routes):
'''Gets TF, IDF data for archetypes, then finds TFIDF and cosine
similarity for each route/style combination.
Finding the raw cosine similarity scores requires the functions
archetypal_tf, archetypal_idf, and normalize. This function helps
organize the retrieval and processing of the data for those functions.
Args:
word_count(Pandas dataframe): Dataframe with index route_id and
column 'word_count' - length of a route description in
words
Returns:
TFIDF.csv(CSV file): TFIDF for each word in each style. This
helps users determine if the TFIDF values are what they
would expect when adding new styles.
routes(Pandas dataframe): Holds cosine similarity for each
route/style combination'''
if click.confirm('Rescore archetypes?'):
# Gets Term-Frequency data for words in archetype documents
archetypes = archetypal_tf(*styles, path=path)
# Gets list of unique words in archetype documents
words = tuple(archetypes.index.tolist())
# Gets IDF Values for those words from the Database
idf = archetypal_idf(words)
# Selects words for archetype documents that have a correpsonding
# IDF value in the database
archetypes = archetypes[archetypes.index.isin(idf.index)]
# Multiplies TF by IDF values to get TFIDF score
archetypes = archetypes.mul(idf['idf'], axis=0)
# Normalizes TFIDF scores
archetypes = normalize(
table=archetypes,
inplace=True,
*styles)
archetypes = archetypes.rename(
columns={'index': 'word'}
).set_index('word')
# Writes to CSV
archetypes.to_csv(path + 'TFIDF.csv')
archetypes = pd.read_csv(path + 'TFIDF.csv', index_col='word')
# Groups words by route_id, then finds cosine similarity for each
# route-style combination
routes = routes.groupby('route_id').progress_apply(
cosine_similarity,
archetypes=archetypes)
# Reformats routes dataframe
routes.index = routes.index.droplevel(1)
routes = pd.concat([routes, word_count], axis=1, sort=False)
routes.fillna(0, inplace=True)
return routes
def weighted_scores(*styles, table, inplace=False):
'''Weights cosine similarity based on credibility.
The cosine similarity between a route and a style archetype
measures how close the two documents are. Depending on the score
and the word count of the route, however, this score can be more or
less believable. Using Bayesian statistics helps weight the scores
based on the credibility.
We can plot word count and cosine similarity in two dimensions.
Normalizing each so that the maximum value is one results in a
plane with four edge cases:
cosine similarity | word count
0 0
1 0
0 1
1 1
When both word count and cosine similarity is high, the
believability of the cosine score is at its highest. This is
analagous to a route that scores well with the 'overhang' document,
therefore mentioning words like 'overhang' or 'roof' frequently,
that also has a lot of words.
If the word count is high and the cosine similarity is low the
believability of the score is high, but not as high as before.
This is analagous to a route that never mentions words associated
with 'overhang' despite a high word count. We can be reasonably
sure in this case that the route does not have an overhang.
If the word count of a route is low but the cosine score is high,
we can be reasonably sure that the score is somewhat accurate. This
is a result of a route called, for instance, 'Overhang Route'.
Despite the low word count, it is highly likely that the route has
an overhang on it.
Finally, for routes that have both low word count and cosine score,
we have no way to be sure of the presence (or absence) of a
feature. In this case, our best guess is that the route is at
chance of featuring a given style of climbing.
If we chart word count, cosine similarity, and the credibility of
the cosine score, we are left with a cone with a point at the
origin, reaching up at a 45 degree angle along the credibility (z)
axis. Each route will exist somewhere on the surface of the cone.
To make use of this, we need to calculate this position. The height
to the cone gives us the credibility, and can be calculated with:
Credibility = sqrt(W ** 2 + C ** 2) * tan(45 degrees)
Since tan(45 degrees) is 1, this simplifies to:
Credibility = sqrt(W ** 2 + C ** 2)
W = Word count
C = Cosine similarity
The credibility of a route's score can be fed back into the score
to find a weighted route score. As the word count and cosine score
get close to zero, the average score should play more of a role in
the outcome. Therefore:
Score = C * sqrt(W ** 2 + C ** 2) + (1 - C)(1 - W) * Cm
W = word count
C = cosine Similarity
Cm = Average cosine similarity across routes
Finally, the scores are processed with a Sigmoid function,
specifically the logistic function.
f(x) = L / 1 + e^(-k(x-x'))
L = upper bound
e = Euler's constant
k = logistic growth rate
x' = Sigmoid midpoint
By manipulating the constants in this function, we can find a
continuous threshold-like set of values that are bounded by 0 and
1. The midpoint of the threshold is the mean value of the scores
plus one standard devaition. Therefore, the function used here is:
f(x) = 1 / (1 + e^(-100(x - x'))
x' = mean + sigma
e = Euler's constant
Args:
*styles(str): Names of the style archetypes
table(Pandas dataframe): Master dataframe of cosine scores for
each route
inplace(Boolean, default = False):
If inplace=False, adds new columns with weighted values.
If inplace=True, replaces the columns.
Returns:
Updated SQL Database'''
# Gets name for the columns to write data
if inplace:
count = 'word_count'
else:
count = 'word_count_norm'
# As the word count increases, the credibility increases as a
# logarithmic function
table[count] = np.log10(table['word_count'])
table_min = table[count].min()
table_max = table[count].max()
table_diff = table_max - table_min
table[count] = (table[count].values - table_min) / table_diff
# Gets weighted scores for each style
for style in styles:
# Stores name to write data on
if inplace:
column_name = style
else:
column_name = style + '_weighted'
# Find average cosine similarity across routes
style_avg = table[style].mean()
# Calculate weighted rating
table[column_name] = (
table[style].values * np.sqrt(
table[style].values ** 2 + table[count].values ** 2)
+ (1 - table[count].values) * (1 - table[style].values)
* style_avg)
threshold = table[column_name].mean() + table[column_name].std()
# Calculates final score using Sigmoid function
table[column_name] = (
1 / (1 + np.e ** (-100 *
(table[column_name]
- threshold))))
return table
# Run functions
print('Getting route information')
routes = get_routes()
print('Getting word count')
word_count = get_word_count()
print('Scoring routes')
routes = score_routes(
*styles,
word_count=word_count,
path=path,
routes=routes)
print('Getting weighted scores')
routes = weighted_scores(*styles, table=routes, inplace=True)
# Collects the full database
query = 'SELECT * FROM Routes'
all_routes = pd.read_sql(query, conn, index_col='route_id')
# Combines columns in the routes dataframe with the full database if
# they don't already exist in the full database
updated = pd.concat(
[routes[
~routes.index.isin(all_routes.index)],
all_routes],
sort=False)
updated.update(routes)
updated.rename_axis('id', inplace=True)
for i in range(5):
feature = terrain_types[i]
other_features = terrain_types[:i] + terrain_types[i+1:]
other_features = updated[other_features]
updated[feature+'_diff'] = updated[feature] * (
updated[feature]
- other_features.sum(axis=1))
# Write to Database
updated.to_sql(
'Routes_scored',
con=engine,
if_exists='replace')
return
def get_links(route, route_links):
"""Gets links between a route and all parent areas
Args:
route(Series): Route information
route_links(Series): Links between areas
Returns:
Updated SQL"""
route_id = route.name
parents = [route.squeeze()]
base = False
while not base:
try:
grandparent = route_links.loc[parents[-1]]['from_id']
parents.append(grandparent)
except:
base = True
parents = pd.DataFrame({
'id': route_id,
'area': parents,
})
parents = parents.dropna(how='any')
parents.to_sql(
'route_links',
con=engine,
if_exists='append',
index=False)
def get_children(area):
"""Gets area children for all areas
Args:
area(Series): area information
Returns:
Updated SQL"""
# Checks if child area in area DB
try:
children = area_links.loc[area]
# If not, this is a base level area
except:
return
if type(children) is np.int64:
children = pd.Series(
data=children,
index=[area],
name='id')
children.index.name = 'from_id'
for child in children:
grandchildren = get_children(child)
if grandchildren is not None:
grandchildren.index = [area] * len(grandchildren)
children = pd.concat([children, grandchildren])
return children
def get_area_details(*styles):
"""Gets route data for each area and creates a summary.
Args:
styles: terrain styles
Returns:
Updated SQL
"""
routes = pd.read_sql("""
SELECT *
FROM routes_scored""",
con=engine,
index_col='id')
terrain_info = routes[terrain_types]
average_stars = routes.stars.mean()
other = ['alpine', 'pitches', 'length', 'danger_conv']
def grade_areas():
routes_in_area = pd.read_sql("""
SELECT *
FROM route_links""",
con=engine,
index_col='area').squeeze()
routes_in_area = routes_in_area.groupby(routes_in_area.index)
def area_styles_and_grades(area):
area_routes = routes.loc[area]
style = area_routes[climbing_styles+other].mean()
grade = area_routes[grades].mean().round()
grade_std = area_routes[grades].std() + grade
grade_std = grade_std.round()
grade_std.index = grade_std.index + '_std'
score_total = (area_routes.stars * area_routes.votes).sum()
votes_total = area_routes.votes.sum()
bayes = (score_total + 10 * average_stars) / (votes_total + 10)
area_information = style.append(grade)
area_information = area_information.append(grade_std)
area_information['bayes'] = bayes
area_information = area_information.to_frame().transpose()
return area_information
def get_conversion(area):
while True:
if area.sport or area.trad or area.tr:
score = area.rope_conv
try:
score = int(area.rope_conv)
except:
break
score_std = area.rope_conv_std
if score_std == score_std:
score_std = int(area.rope_conv_std)
else:
score_std = score
for system in rope_systems:
if score_std >= len(system_to_grade[system]):
score_std = -1
area[system] = system_to_grade[system][score]
area[system+'_std'] = system_to_grade[system][score_std]
else:
area.rope_conv = None
area.rope_conv_std = None
break
while True:
if area.boulder:
score = area.boulder_conv
try:
score = int(score)
except:
break
score_std = area.boulder_conv_std
if score_std == score_std:
score_std = int(score_std)
else:
score_std = score
for system in boulder_systems:
if score_std >= len(system_to_grade[system]):
score_std = -1
area[system] = system_to_grade[system][score]
area[system+'_std'] = system_to_grade[system][score_std]
else:
area.boulder_conv = None
area.boulder_conv_std = None
break
for system, data in misc_system_to_grade.items():
if area[system]:
score = area[data['conversion']]
score_std = area[data['conversion'] + '_std']
try:
score = int(score)
except:
continue
if score_std == score_std:
score_std = int(score_std)
else:
score_std = score
if score_std >= len(data['grades']):
score_std = -1
area[data['rating']] = data['grades'][score]
area[data['rating']+'_std'] = data['grades'][score_std]
return area
def get_grades():
# Get grades for each area
print('Getting routes in area')
area_information = routes_in_area.progress_apply(
area_styles_and_grades)
area_information.index = area_information.index.droplevel(1)
area_information.index = area_information.index.rename('id')
print('Getting area information')
area_information = area_information.progress_apply(
get_conversion, axis=1)
areas = pd.read_sql(
'SELECT * FROM areas',
con=engine,
index_col='id')
areas = areas.update(grades)
print(areas)
# areas.to_sql(
# 'areas',
# if_exists='replace',
# con=engine)
def area_terrain(area):
num_routes = len(area)
area_terrain_info = terrain_info.loc[area].quantile(.95)
area_terrain_info = area_terrain_info / area_terrain_info.max()
area_terrain_info = area_terrain_info.to_frame().transpose()
for i in range(5):
feature = terrain_types[i]
other_features = terrain_types[:i] + terrain_types[i+1:]
other_features = area_terrain_info[other_features]
area_terrain_info[feature+'_diff'] = (
area_terrain_info[feature]
* (area_terrain_info[feature] - other_features.sum(axis=1))
* np.log(num_routes + np.e))
return area_terrain_info
def get_terrain():
area_terrain_info = routes_in_area.progress_apply(area_terrain)
area_terrain_info.index = area_terrain_info.index.droplevel(1)
area_terrain_info.index.rename('id', inplace=True)
for terrain_type in terrain_types:
area_terrain_info[terrain_type + '_diff'] = (
(area_terrain_info[terrain_type + '_diff']
- area_terrain_info[terrain_type + '_diff'].min())
/ (area_terrain_info[terrain_type + '_diff'].max()
- area_terrain_info[terrain_type + '_diff'].min()))
area_terrain_info.index = area_terrain_info.index.astype('int32')
area_terrain_info.dropna(inplace=True)
areas = pd.read_sql('SELECT * FROM areas', con=engine, index_col='id')
areas = areas.update(terrain)
print(areas)
# areas.to_sql(
# 'areas',
# if_exists='replace',
# con=engine)
get_grades()
get_terrain()
def get_base_areas():
cursor.execute('''
SELECT id
FROM areas
WHERE
name = 'International' AND
from_id is Null''')
international_id = cursor.fetchone()[0]
cursor.execute(f"""
SELECT id
FROM areas
WHERE
from_id = {international_id}""")
country_ids = cursor.fetchall()
country_ids = [
country_id for sublist in country_ids for country_id in sublist]
base_areas = [international_id] + [
country_id for country_id in country_ids]
return tuple(base_areas)
def base_area_land_area():
countries = | pd.read_csv('country_land_data.csv', encoding='latin-1') | pandas.read_csv |
import numpy as np
from scipy.stats import ranksums
import pandas as pd
import csv
file = pd.read_csv('merged-file.txt', header=None, skiprows=0, delim_whitespace=True)
file.columns = ['Freq_allel','dpsnp','sift','polyphen','mutas','muaccessor','fathmm','vest3','CADD','geneName']
df = file.drop_duplicates(keep=False)
################## START ###################
# calculate ranksums for SIFT
sift_df = df[['geneName','sift']]
# extract all non-driver genes | sift_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
sift_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | sift_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = sift_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with SIFT
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### POLYPHEN ###################
# calculate ranksums for POLYPHEN
polyphen_df = df[['geneName','polyphen']]
# extract all non-driver genes | sift_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
polyphen_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | polyphen_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = polyphen_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with polyphen
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### MutationTaster ###################
# calculate ranksums for MutationTaster
mutas_df = df[['geneName','mutas']]
# extract all non-driver genes | MutationTaster_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
mutas_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | MutationTaster_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = mutas_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with MutationTaster
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### Mutationassessor ###################
# calculate ranksums for Mutationassessor
muaccessor_df = df[['geneName','muaccessor']]
# extract all non-driver genes | Mutationassessor_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
muaccessor_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | Mutationassessor_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = muaccessor_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with Mutationassessor
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### fathmm ###################
# calculate ranksums for fathmm
fathmm_df = df[['geneName','fathmm']]
# extract all non-driver genes | fathmm_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
fathmm_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | fathmm_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = fathmm_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with fathmm
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### VEST3 ###################
# calculate ranksums for VEST3
vest3_df = df[['geneName','vest3']]
# extract all non-driver genes | VEST3_score
genelist = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t')
genelist.columns = ['geneName']
#
merged_df = pd.merge(
vest3_df, genelist,
how='outer', on=['geneName'], indicator=True, suffixes=('_foo','')).query(
'_merge == "left_only"')
merged_df.drop(['geneName','_merge'], axis=1, inplace=True)
# extract all predicted driver genes | VEST3_score
genelist1 = pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-predicteddriver.tsv', header=None, skiprows=0, sep='\t')
genelist1.columns = ['geneName']
merged_df1 = vest3_df.merge(genelist1, how = 'inner', on = ['geneName'])
merged_df1.drop(['geneName'], axis=1, inplace=True)
# calculate p-value for ranksums with VEST3
stat, pvalue = ranksums(merged_df, merged_df1)
print(pvalue)
#################### CADD ###################
# calculate ranksums for CADD
CADD_df = df[['geneName','CADD']]
# extract all non-driver genes | CADD_score
genelist = | pd.read_csv('/encrypted/e3000/gatkwork/COREAD-ESCA-all-driver.tsv', header=None, skiprows=0, sep='\t') | pandas.read_csv |
#########################################################
### DNA variant annotation tool
### Version 1.0.0
### By <NAME>
### <EMAIL>
#########################################################
import pandas as pd
import numpy as np
import allel
import argparse
import subprocess
import sys
import os.path
import pickle
import requests
import json
def extract_most_deleterious_anno(row, num_ann_max):
ann_order = pd.read_csv(anno_order_file, sep=' ')
alt = row[:num_ann_max]
anno = row[num_ann_max:]
alt.index = range(0, len(alt))
anno.index = range(0, len(anno))
ann_all_alt = pd.DataFrame()
alt_unique = alt.unique()
for unique_alt in alt_unique:
if unique_alt != '':
anno_all = anno[alt == unique_alt]
ann_order_all = pd.DataFrame()
for ann_any in anno_all:
if sum(ann_any == ann_order.Anno) > 0:
ann_any_order = ann_order[ann_order.Anno == ann_any]
else:
ann_any_order = ann_order.iloc[ann_order.shape[0]-1]
ann_order_all = ann_order_all.append(ann_any_order)
small_ann = ann_order_all.sort_index(ascending=True).Anno.iloc[0]
ann_unique_alt = [unique_alt, small_ann]
ann_all_alt = ann_all_alt.append(ann_unique_alt)
ann_all_alt.index = range(0, ann_all_alt.shape[0])
return ann_all_alt.T
def run_snpeff(temp_out_name):
snpeff_command = ['java', '-Xmx4g', '-jar', snpeff_path, \
'-ud', '0', \
# '-v', \
'-canon', '-noStats', \
ref_genome, vcf_file]
temp_output = open(temp_out_name, 'w')
subprocess.run(snpeff_command, stdout=temp_output)
temp_output.close()
def get_max_num_ann(temp_out_name):
num_ann_guess = 500
callset = allel.vcf_to_dataframe(temp_out_name, fields='ANN', numbers={'ANN': num_ann_guess})
num_ann = callset.apply(lambda x: sum(x != ''), axis=1)
num_ann_max = num_ann.max() # num_ann_max = 175
return num_ann_max
def get_ann_from_output_snpeff(temp_out_name):
callset = allel.read_vcf(temp_out_name, fields='ANN', transformers=allel.ANNTransformer(), \
numbers={'ANN': num_ann_max})
df1 = pd.DataFrame(data=callset['variants/ANN_Allele'])
df2 = pd.DataFrame(data=callset['variants/ANN_Annotation'])
df3 = pd.concat((df1, df2), axis=1)
df3.columns = range(0, df3.shape[1])
return df3
def get_anno_total(anno_from_snpeff):
anno_total = pd.DataFrame()
pickle_dump = 'pickle_dump.temp'
if not os.path.isfile(pickle_dump):
print('Extracting most deleterious annotations generated by SnpEff')
for index, row in anno_from_snpeff.iterrows():
anno_row = extract_most_deleterious_anno(row, num_ann_max)
anno_total = anno_total.append(anno_row)
print('done')
dump_file = open(pickle_dump, 'wb')
pickle.dump(anno_total, dump_file, pickle.HIGHEST_PROTOCOL)
dump_file.close()
dump_file = open(pickle_dump, 'rb')
anno_total = pickle.load(dump_file)
a = ['Alt_' + str(i) for i in range(1, num_alt + 1)]
b = ['Anno_' + str(i) for i in range(1, num_alt + 1)]
c = list(range(0, num_alt * 2))
c[::2] = a
c[1::2] = b
anno_total.columns = c
anno_total.replace(np.nan, -1, inplace=True)
anno_total.index = range(0, anno_total.shape[0])
return anno_total
def get_num_alternate(vcf_file):
num_alt = allel.read_vcf(vcf_file, fields='numalt')['variants/numalt'].max()
return num_alt
def get_dp_ro_ao(temp_out_name):
callset_dp_ro_ao = allel.vcf_to_dataframe(temp_out_name, fields=['DP', 'RO', 'AO'], alt_number=num_alt)
callset_dp_ro_ao.index = range(0, callset_dp_ro_ao.shape[0])
return callset_dp_ro_ao
def get_alt_ref_ratio(callset_dp_ro_ao):
callset_ratio = | pd.DataFrame() | pandas.DataFrame |
import preprocess
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import pandas as pd
import plotly.express as px
from sklearn.decomposition import FastICA
import matplotlib.pyplot as plt
import numpy as np
dataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\mushrooms_data.csv"
reducedDataPath = r"C:\Users\shalev\Desktop\Introduction_to_AI\Introduction-to-AI\Data\reduced_data.csv"
class DimantionReduction:
def __init__(self, n_components=28):
self.data = preprocess.readCsv(dataPath)
self.encodedData = preprocess.preprocessData(self.data)
self.X_train = self.encodedData[0]
self.X_test = self.encodedData[1]
self.y_train = self.encodedData[2]
self.y_test = self.encodedData[3]
self.X = self.encodedData[4]
self.y = self.encodedData[5]
self.n_components = n_components
self.reduced_X = None
self.reduced_X_for_plot = None
def reduceDimension(self):
# normalized_X = StandardScaler().fit_transform(self.X)
pca = PCA(n_components=self.n_components)
principalComponents = pca.fit_transform(self.X)
column_names = []
for i in range(self.n_components):
column_names.append('principal component' + str(i))
principalDf = pd.DataFrame(data=principalComponents
, columns=column_names)
self.reduced_X = principalDf
print(self.reduced_X)
# self.reduced_X.to_csv(reducedDataPath)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of features')
plt.ylabel('explained variance')
plt.show()
def reduceDimensionForPlot(self):
# normalized_X = StandardScaler().fit_transform(self.X)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(self.X)
# print(pca.explained_variance_ratio_)
principalDf = pd.DataFrame(data=principalComponents
, columns=['principal component 1', 'principal component 2'])
self.reduced_X_for_plot = principalDf
def ICA_reduceDimentionForPlot(self):
# normalized_X = StandardScaler().fit_transform(self.X)
ica = FastICA(n_components=2)
principalComponents = ica.fit_transform(self.X)
# print(pca.explained_variance_ratio_)
principalDf = pd.DataFrame(data=principalComponents
, columns=['principal component 1', 'principal component 2'])
self.reduced_X_for_plot = principalDf
def plotReducedData(self):
new_df = | pd.concat([self.reduced_X_for_plot, self.data[['odor']]], axis=1) | pandas.concat |
from dis import dis
import numpy as np
import pandas as pd
import warnings
from credoai.modules.credo_module import CredoModule
from credoai.utils.constants import MULTICLASS_THRESH
from credoai.utils.common import NotRunError, is_categorical
from credoai.utils.dataset_utils import ColumnTransformerUtil
from credoai.utils.model_utils import get_generic_classifier
from itertools import combinations
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.metrics import roc_auc_score, make_scorer
from sklearn.feature_selection import mutual_info_classif, mutual_info_regression
from typing import List, Optional
class DatasetFairness(CredoModule):
"""Dataset module for Credo AI.
This module takes in features and labels and provides functionality to perform dataset assessment
Parameters
----------
X : pandas.DataFrame
The features
y : pandas.Series
The outcome labels
sensitive_features : pandas.Series
A series of the sensitive feature labels (e.g., "male", "female") which should be used to create subgroups
categorical_features_keys : list[str], optional
Names of the categorical features
categorical_threshold : float
Parameter for automatically identifying categorical columns. See
`credoai.utils.common.is_categorical`
"""
def __init__(self,
X,
y,
sensitive_features: pd.DataFrame,
categorical_features_keys: Optional[List[str]]=None,
categorical_threshold: float=0.05):
self.data = pd.concat([X, y], axis=1)
self.sensitive_features = sensitive_features
self.X = X
self.y = y
# set up categorical features
if categorical_features_keys:
self.categorical_features_keys = categorical_features_keys.copy()
for sensitive_feature_name in self.sensitive_features:
if sensitive_feature_name in self.categorical_features_keys:
self.sensitive_features[sensitive_feature_name] = self.sensitive_features[sensitive_feature_name].astype('category')
self.categorical_features_keys.remove(sensitive_feature_name)
else:
self.categorical_features_keys = self._find_categorical_features(categorical_threshold)
def run(self):
"""Runs the assessment process
Returns
-------
dict, nested
Key: assessment category
Values: detailed results associated with each category
"""
self.results = {}
for sf_name, sf_series in self.sensitive_features.items():
sensitive_feature_prediction_results = self._run_cv(sf_series)
sensitive_feature_prediction_results = {
f'{sf_name}-{key}': val for key, val in sensitive_feature_prediction_results.items()
}
group_differences = self._group_differences(sf_series)
normalized_mutual_information = self._calculate_mutual_information(sf_series)
balance_metrics = self._assess_balance_metrics(sf_series)
balance_metrics = {f'{sf_name}-{key}': val for key, val in balance_metrics.items()}
self.results.update({
**balance_metrics,
**sensitive_feature_prediction_results,
f'{sf_name}-standardized_group_diffs': group_differences,
f'{sf_name}-normalized_mutual_information': normalized_mutual_information
})
return self
def prepare_results(self):
"""Prepares results for export to Credo AI's Governance App
Structures a subset of results for export as a dataframe with appropriate structure
for exporting. See credoai.modules.credo_module.
Returns
-------
pd.DataFrame
Raises
------
NotRunError
If results have not been run, raise
"""
if self.results is not None:
metric_types_names = [
'sensitive_feature_prediction_score',
'demographic_parity_difference',
'demographic_parity_ratio'
]
prepared_arr = []
index = []
for sensitive_feature_name in self.sensitive_features:
metric_types = [sensitive_feature_name + '-' + x for x in metric_types_names]
for metric_type in metric_types:
if metric_type not in self.results:
continue
val = self.results[metric_type]
# if multiple values were calculated for metric_type
# add them all. Assumes each element of list is a dictionary with a "value" key,
# and other optional keys as metricmetadata
if isinstance(val, list):
for l in val:
index.append(metric_type)
prepared_arr.append(l)
else:
# assumes the dictionary has a "value" key, along with other optional keys
# as metric metadata
if isinstance(val, dict):
tmp = val
elif isinstance(val, (int, float)):
tmp = {'value': val}
index.append(metric_type)
prepared_arr.append(tmp)
res = | pd.DataFrame(prepared_arr, index=index) | pandas.DataFrame |
#!/usr/bin/env python3.9
import matplotlib.pyplot as plt
import pandas as pd
import subprocess
import copy
import re
import time
import argparse
import sys
class Log:
date=None
add_lines=0
del_lines=0
def reset(self):
self.date=None
self.add_lines=0
self.del_lines=0
def commit(self, logArray):
if self.date is not None:
# print(f'{log.add_lines} {log.del_lines} {log.date}')
logArray.append( copy.deepcopy(self) )
self.reset()
def fetch_git_log(workdir):
logArray = []
#pattern_space = re.compile('[:space:]+')
pattern_int = re.compile('^[0-9]+$')
try:
proc = subprocess.Popen(['git',
'log', '--numstat',
'--date', 'format:%s'],
cwd=workdir,
stdout = subprocess.PIPE)
logArray = []
log = Log()
for line in proc.stdout:
line = line.decode('utf-8')
line = line.rstrip()
col=line.split()
if len(col)==0:
continue
if col[0] == "Date:":
log.reset()
log.date=int(col[1])
if len(col)>2 and pattern_int.match(col[0]) and pattern_int.match(col[1]):
log.add_lines += int(col[0])
log.del_lines -= int(col[1])
if col[0] == "commit":
log.commit(logArray)
log.commit(logArray)
except subprocess.CalledProcessError as err:
print('ERROR:', err.output)
# Reverse!
return logArray[::-1]
def create_dataframe_from_logarray(logarray):
total_line=0
data=[]
for e in logarray:
total_line += e.add_lines + e.del_lines
# print(f"{e.date} {e.add_lines} {e.del_lines} {total_line}")
data.append([e.date, e.add_lines, e.del_lines, total_line])
df = pd.DataFrame(data,
columns=["date",
"add_lines", "del_lines", "total_line"])
df["ts"] = pd.to_datetime(df["date"].astype(int), unit='s')
return df
def plot_simple(df):
fig, ax = plt.subplots(1,1)
ax.plot(df["ts"], df["total_line"])
for tick in ax.get_xticklabels():
tick.set_rotation(15)
ax.set_ylabel("Lines of source code")
ax.set_title(args.title)
ax.grid()
return fig, ax
def plot_detailed(df):
fig, ax = plt.subplots(2,1, sharex=True)
ax[0].plot(df["ts"], df["total_line"])
ax[0].set_ylabel("Lines of source code")
ax[0].grid()
ax[1].vlines(df["ts"], 0, df["add_lines"],
color="b", label="Added")
ax[1].vlines(df["ts"], 0, df["del_lines"],
color="r", label="Deleted")
#ymax=df["add_lines"].max()
#ymin=df["del_lines"].min()
ymax=df["add_lines"].quantile(q=.99)
ymin=df["del_lines"].quantile(q=.01)
ax[1].set_ylim([ymin,ymax])
ax[1].grid()
ax[1].legend()
for tick in ax[1].get_xticklabels():
tick.set_rotation(15)
# ax[1].set_ylabel("Changed lines")
ax[0].set_title(args.title)
return fig, ax
# ---------------------------
parser = argparse.ArgumentParser(description='Plot lines of source codes in a git project')
parser.add_argument('workdir', type=str, default=".", nargs='?')
parser.add_argument('--save', type=str, metavar='filename', help="save figure as file")
parser.add_argument('--dump', type=str, metavar='filename',help="dump data into csv file")
parser.add_argument('--restore', type=str, metavar='filename',help="load data from csv file")
parser.add_argument('--simple', action='store_true', help="plot simple graph")
parser.add_argument('--today', action='store_true')
parser.add_argument('--title', type=str, help="set title ")
args = parser.parse_args()
df = None
if args.restore:
df = pd.read_csv(args.restore, index_col=False)
df["ts"] = pd.to_datetime(df["date"].astype(int), unit='s')
else:
logarray = fetch_git_log(args.workdir)
df = create_dataframe_from_logarray(logarray)
if args.dump:
df.drop(["ts"], axis=1).to_csv(args.dump, index=False)
if df.shape[0] == 0:
print("No commit exits.")
sys.exit(0)
if args.today:
total_line=df.tail(1)["total_line"]
date=time.time()
today= | pd.to_datetime(date, unit='s') | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = | DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']}) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), pd.Series(actual), check_names=False)
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_natgas_forward_price():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_natgas_forward_price)
mock = CommodityNaturalGasHub('MA001', 'AGT')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21'))
expected = pd.Series([2.880], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
actual = pd.Series(tm.forward_price(mock,
price_method='GDD',
contract_range='F21-G21'))
expected = pd.Series([2.8629152542372878], index=[datetime.date(2019, 1, 2)], name='price')
assert_series_equal(expected, actual)
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='F21-I21')
with pytest.raises(ValueError):
tm.forward_price(mock,
price_method='GDD',
contract_range='I21')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.forward_price(mock,
price_method='GDD',
contract_range='F21')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_get_iso_data():
tz_map = {'MISO': 'US/Central', 'CAISO': 'US/Pacific'}
for key in tz_map:
assert (tm._get_iso_data(key)[0] == tz_map[key])
def test_string_to_date_interval():
assert (tm._string_to_date_interval("K20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("K20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("k20")['start_date'] == datetime.date(2020, 5, 1))
assert (tm._string_to_date_interval("k20")['end_date'] == datetime.date(2020, 5, 31))
assert (tm._string_to_date_interval("Cal22")['start_date'] == datetime.date(2022, 1, 1))
assert (tm._string_to_date_interval("Cal22")['end_date'] == datetime.date(2022, 12, 31))
assert (tm._string_to_date_interval("Cal2012")['start_date'] == datetime.date(2012, 1, 1))
assert (tm._string_to_date_interval("Cal2012")['end_date'] == datetime.date(2012, 12, 31))
assert (tm._string_to_date_interval("Cal53")['start_date'] == datetime.date(1953, 1, 1))
assert (tm._string_to_date_interval("Cal53")['end_date'] == datetime.date(1953, 12, 31))
assert (tm._string_to_date_interval("2010")['start_date'] == datetime.date(2010, 1, 1))
assert (tm._string_to_date_interval("2010")['end_date'] == datetime.date(2010, 12, 31))
assert (tm._string_to_date_interval("3Q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3Q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2h2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2h2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("3q20")['start_date'] == datetime.date(2020, 7, 1))
assert (tm._string_to_date_interval("3q20")['end_date'] == datetime.date(2020, 9, 30))
assert (tm._string_to_date_interval("2H2021")['start_date'] == datetime.date(2021, 7, 1))
assert (tm._string_to_date_interval("2H2021")['end_date'] == datetime.date(2021, 12, 31))
assert (tm._string_to_date_interval("Mar2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("Mar2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("March2021")['start_date'] == datetime.date(2021, 3, 1))
assert (tm._string_to_date_interval("March2021")['end_date'] == datetime.date(2021, 3, 31))
assert (tm._string_to_date_interval("5Q20") == "Invalid Quarter")
assert (tm._string_to_date_interval("HH2021") == "Invalid num")
assert (tm._string_to_date_interval("3H2021") == "Invalid Half Year")
assert (tm._string_to_date_interval("Cal2a") == "Invalid year")
assert (tm._string_to_date_interval("Marc201") == "Invalid date code")
assert (tm._string_to_date_interval("M1a2021") == "Invalid date code")
assert (tm._string_to_date_interval("Marcha2021") == "Invalid date code")
assert (tm._string_to_date_interval("I20") == "Invalid month")
assert (tm._string_to_date_interval("20") == "Unknown date code")
def test_implied_vol_commod():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_implied_volatility)
mock = Index('MA001', AssetClass.Commod, 'Option NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.implied_volatility(mock,
tenor='F21-H21')
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
replace.restore()
def test_fair_price():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
mock2 = Swap('MA002', AssetClass.Commod, 'Swap Oil')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock,
tenor='F21')
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.fair_price(mock,
tenor=None)
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price_swap)
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm.fair_price(mock2)
assert_series_equal(pd.Series([2.880],
index=[pd.Timestamp('2019-01-02')],
name='fairPrice'),
pd.Series(actual),
)
replace.restore()
def test_weighted_average_valuation_curve_for_calendar_strip():
target = {
'F21': [2.880],
'F21-H21': [2.815756],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fair_price)
mock = Index('MA001', AssetClass.Commod, 'Swap NG Exchange')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F21-H21',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
assert_series_equal(pd.Series(target['F21-H21'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='Invalid',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='F20-I20',
query_type=QueryType.FAIR_PRICE,
measure_field='fairPrice'
)
with pytest.raises(ValueError):
tm._weighted_average_valuation_curve_for_calendar_strip(mock,
contract_range='3H20',
query_type=QueryType.PRICE,
measure_field='fairPrice'
)
replace.restore()
def test_fundamental_metrics():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
period = '1y'
direction = tm.FundamentalMetricPeriodDirection.FORWARD
actual = tm.dividend_yield(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.dividend_yield(..., period, direction, real_time=True)
actual = tm.earnings_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share(..., period, direction, real_time=True)
actual = tm.earnings_per_share_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.earnings_per_share_positive(..., period, direction, real_time=True)
actual = tm.net_debt_to_ebitda(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.net_debt_to_ebitda(..., period, direction, real_time=True)
actual = tm.price_to_book(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_book(..., period, direction, real_time=True)
actual = tm.price_to_cash(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_cash(..., period, direction, real_time=True)
actual = tm.price_to_earnings(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings(..., period, direction, real_time=True)
actual = tm.price_to_earnings_positive(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_earnings_positive(..., period, direction, real_time=True)
actual = tm.price_to_sales(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.price_to_sales(..., period, direction, real_time=True)
actual = tm.return_on_equity(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.return_on_equity(..., period, direction, real_time=True)
actual = tm.sales_per_share(mock_spx, period, direction)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='fundamentalMetric'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.sales_per_share(..., period, direction, real_time=True)
replace.restore()
def test_central_bank_swap_rate(mocker):
target = {
'meeting_absolute': -0.004550907771,
'meeting_relative': -0.00002833724599999969,
'eoy_absolute': -0.003359767756,
'eoy_relative': 0.001162802769,
'spot': -0.00455
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = mock_meeting_absolute()
actual_abs = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2019, 12, 6))
assert (target['meeting_absolute'] == actual_abs.loc[dt.date(2020, 1, 23)])
assert actual_abs.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_rel = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative',
dt.date(2019, 12, 6))
assert (target['meeting_relative'] == actual_rel.loc[dt.date(2020, 1, 23)])
assert actual_rel.dataset_ids == ('CENTRAL_BANK_WATCH',)
mock_get_data.return_value = mock_ois_spot()
actual_spot = tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', dt.date(2019, 12, 6))
assert (target['spot'] == actual_spot.loc[dt.date(2019, 12, 6)])
assert actual_spot.dataset_ids == ('CENTRAL_BANK_WATCH',)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, 'meeting_forward')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', '2019-09-01')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5)
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'relative')
with pytest.raises(NotImplementedError):
tm.central_bank_swap_rate(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
replace.restore()
def test_policy_rate_expectation(mocker):
target = {
'meeting_number_absolute': -0.004550907771,
'meeting_number_relative': -0.000028337246,
'meeting_date_relative': -0.000028337246,
'meeting_number_spot': -0.004522570525
}
mock_eur = Currency('MARFAGXDQRWM07Y2', 'EUR')
with DataContext(dt.date(2019, 12, 6), dt.date(2019, 12, 6)):
replace = Replacer()
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EUR', ))]
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
mocker.patch.object(Dataset, 'get_data', side_effect=get_data_policy_rate_expectation_mocker)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
assert (target['meeting_number_absolute'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute',
dt.date(2020, 1, 23))
assert (target['meeting_number_absolute'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
assert_allclose([target['meeting_number_relative']], [actual_num.loc[dt.date(2019, 12, 6)]],
rtol=1e-9, atol=1e-15)
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_num = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 0)
assert (target['meeting_number_spot'] == actual_num.loc[dt.date(2019, 12, 6)])
assert actual_num.dataset_ids == ('CENTRAL_BANK_WATCH',)
actual_date = tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '2019-10-24')
assert (target['meeting_number_spot'] == actual_date.loc[dt.date(2019, 12, 6)])
assert actual_date.dataset_ids == ('CENTRAL_BANK_WATCH',)
mocker.patch.object(Dataset, 'get_data', side_effect=[mock_meeting_expectation(),
mock_empty_market_data_response()])
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', 2)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', '5')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 5.5)
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', '01-09-2019')
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'normalized', dt.date(2019, 9, 1))
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'relative', -2)
with pytest.raises(NotImplementedError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.SPOT, 'absolute', real_time=True)
mock_get_data = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
mock_get_data.return_value = pd.DataFrame()
with pytest.raises(MqError):
tm.policy_rate_expectation(mock_eur, tm.MeetingType.MEETING_FORWARD, 'absolute', 2)
replace.restore()
def test_realized_volatility():
from gs_quant.timeseries.econometrics import volatility, Returns
from gs_quant.timeseries.statistics import generate_series
random = generate_series(100).rename('spot')
window = 10
type_ = Returns.SIMPLE
replace = Replacer()
market_data = replace('gs_quant.timeseries.measures._market_data_timed', Mock())
return_value = MarketDataResponseFrame(random)
return_value.dataset_ids = _test_datasets
market_data.return_value = return_value
expected = volatility(random, window, type_)
actual = tm.realized_volatility(Cross('MA123', 'ABCXYZ'), window, type_)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_esg_headline_metric():
replace = Replacer()
mock_aapl = Stock('MA4B66MW5E27U9VBB94', 'AAPL')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_esg)
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esNumericScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esPolicyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esProductImpactScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_MOMENTUM_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='esMomentumScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_REGIONAL_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='gRegionalScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.CONTROVERSY_SCORE)
assert_series_equal(pd.Series([2, 4, 6], index=_index * 3, name='controversyScore'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_NUMERIC_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esNumericPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_POLICY_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPolicyPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_AGGREGATE_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esPercentile'), pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_PRODUCT_IMPACT_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esProductImpactPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_AGGREGATE_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='gPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_MOMENTUM_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='esMomentumPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.GOVERNANCE_REGIONAL_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='gRegionalPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.CONTROVERSY_PERCENTILE)
assert_series_equal(pd.Series([81.2, 75.4, 65.7], index=_index * 3, name='controversyPercentile'),
pd.Series(actual))
actual = tm.esg_headline_metric(mock_aapl, tm.EsgMetric.ENVIRONMENTAL_SOCIAL_DISCLOSURE)
assert_series_equal( | pd.Series([49.2, 55.7, 98.4], index=_index * 3, name='esDisclosurePercentage') | pandas.Series |
import pandas as pd
df = pd.DataFrame({"A": [1, 2, 3, 4, 5]})
s = | pd.Series([1, 2, 3]) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from utils import sum_country_regions, get_change_rates, generate_df_change_rate, sliding_window, generate_COVID_input, generate_COVID_aux_input
import pickle
import copy
import os
import argparse
parser = argparse.ArgumentParser(description='Hi-covidnet DATALOADER')
# basic settings
parser.add_argument('--output_size', type=int, default=14, metavar='O', help='How many days you are predicting(default: 14)')
parser.add_argument('--save', action='store_true', default=False, help='Saving pre-processed data')
def normalize(df, axis=1):
"""
@df : shape(N,D)
"""
mean = df.iloc[:,4:].mean(axis=axis) # (D)
std = df.iloc[:,4:].std(axis=axis) # (D)
df.iloc[:,4:] = (df.iloc[:,4:].subtract(mean, axis='index')).divide(std, axis='index')
return df, mean, std
def scaling(df_confirm,df_death, df_confirm_change_1st_order,
df_confirm_change_2nd_order,df_death_change_1st_order,
df_death_change_2nd_order, fname="x_mean_std_list_5_27.pkl"):
##scaling
mean_std_list = []
df_confirm, mean, std = normalize(df_confirm, axis=1)
mean_std_list.append((mean,std))
df_death, mean, std = normalize(df_death, axis=1)
mean_std_list.append((mean,std))
df_confirm_change_1st_order, mean, std = normalize(df_confirm_change_1st_order, axis=1)
mean_std_list.append((mean,std))
df_confirm_change_2nd_order, mean, std = normalize(df_confirm_change_2nd_order, axis=1)
mean_std_list.append((mean,std))
df_death_change_1st_order, mean, std = normalize(df_death_change_1st_order, axis=1)
mean_std_list.append((mean,std))
df_death_change_2nd_order, mean, std = normalize(df_death_change_2nd_order, axis=1)
mean_std_list.append((mean,std))
pickle.dump(mean_std_list, open("pickled_ds/"+fname, "wb"))
def google_trenddata_loader(fname, countries_Korea_inbound):
google_trend = pd.read_csv('./dataset/{fname}.csv'.format(fname=fname), index_col=0)
iso_to_country = countries_Korea_inbound.set_index('iso').to_dict()['Country']
google_trend.rename(columns = iso_to_country, inplace = True)
google_trend = google_trend.set_index('date').T.reset_index()
google_trend = google_trend.rename(columns = {'index': 'Country'})
google_trend.columns = google_trend.columns.astype(str)
google_trend = google_trend.rename(columns = {col: str(int(col[4:6]))+'/'+str(int(col[-2:]))+'/' + col[2:4] for col in google_trend.columns[1:].astype(str)})
google_trend.loc[:, google_trend.columns[1:]] /= 100
google_trend.drop(np.argwhere(google_trend.Country == 'Korea, South')[0], inplace=True)
mean, std = google_trend.iloc[:,1:].mean(axis=1), google_trend.iloc[:,1:].std(axis=1)
google_trend.iloc[:,1:] = google_trend.iloc[:,1:].subtract(mean, axis='index').divide(std, axis='index')
return google_trend
def dataloader(output_size, save=False):
url_confirm = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
df_confirm = pd.read_csv(url_confirm, error_bad_lines=False)
url_death = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'
df_death = | pd.read_csv(url_death, error_bad_lines=False) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 5 12:02:47 2021
@author: adarshpl7
"""
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
import seaborn as sns
# import math
#Clears console and stored variables
try:
from IPython import get_ipython
get_ipython().magic('clear') #console
#get_ipython().magic('reset -f') #stored variables
except:
pass
import os
print("Current working directory: {0}".format(os.getcwd())) #current working directory
os.chdir('C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/') #change directory
# Print the current working directory
print("Changed working directory: {0}".format(os.getcwd()))
# Main program starts here
#method-1: import CSV/TSV files
# import csv
# with open("C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/SocialIndicators_BroadUS_2020-10-01_2021-03-31/Ticker_Identifiers_2020-10-01_2021-03-31.tsv") as fd:
# rd = csv.reader(fd, delimiter="\t", quotechar='"')
# for row in rd:
# print(row)
#del(rd) #deleted variable from memory
#method-2: import CSV/TSV files
Identifiers = pd.read_csv ("C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/SocialIndicators_BroadUS_2020-10-01_2021-03-31/Ticker_Identifiers_2020-10-01_2021-03-31.tsv", sep = '\t')
#Identifiers.head()
#Identifiers.columns
Identifiers.iloc[1,:] #prints first row of the dataframe
Identifiers.dtypes #to get data type for all the columns in the dataframe
cc = pd.read_csv ("C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/SocialIndicators_BroadUS_2020-10-01_2021-03-31/SocialIndicators_BroadUS_CloseToClose_2020-10-01_2021-03-31.tsv", sep = '\t')
co = pd.read_csv ("C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/SocialIndicators_BroadUS_2020-10-01_2021-03-31/SocialIndicators_BroadUS_CloseToOpen_2020-10-01_2021-03-31.tsv", sep = '\t')
oc = | pd.read_csv ("C:/Users/adars/Downloads/Laptop/Semester 4/RA work/Social/SocialIndicators_BroadUS_2020-10-01_2021-03-31/SocialIndicators_BroadUS_OpenToClose_2020-10-01_2021-03-31.tsv", sep = '\t') | pandas.read_csv |
"""
Classes for representing datasets of images and/or coordinates.
"""
from __future__ import print_function
import json
import copy
import logging
import os.path as op
import numpy as np
import pandas as pd
import nibabel as nib
from .base import NiMAREBase
from .utils import (tal2mni, mni2tal, mm2vox, get_template, listify,
try_prepend, find_stem, get_masker)
LGR = logging.getLogger(__name__)
class Dataset(NiMAREBase):
"""
Storage container for a coordinate- and/or image-based meta-analytic
dataset/database.
Parameters
----------
source : :obj:`str`
JSON file containing dictionary with database information or the dict()
object
target : :obj:`str`
Desired coordinate space for coordinates. Names follow NIDM convention.
mask : `str`, `Nifti1Image`, or any nilearn `Masker`
Mask(er) to use. If None, uses the target space image, with all
non-zero voxels included in the mask.
"""
_id_cols = ['id', 'study_id', 'contrast_id']
def __init__(self, source, target='mni152_2mm', mask=None):
if isinstance(source, str):
with open(source, 'r') as f_obj:
self.data = json.load(f_obj)
elif isinstance(source, dict):
self.data = source
else:
raise Exception("`source` needs to be a file path or a dictionary")
# Datasets are organized by study, then experiment
# To generate unique IDs, we combine study ID with experiment ID
raw_ids = []
for pid in self.data.keys():
for cid in self.data[pid]['contrasts'].keys():
raw_ids.append('{0}-{1}'.format(pid, cid))
self.ids = raw_ids
# Set up Masker
if mask is None:
mask = get_template(target, mask='brain')
self.masker = get_masker(mask)
self.space = target
self._load_coordinates()
self._load_images()
self._load_annotations()
self._load_texts()
self._load_metadata()
def slice(self, ids):
"""
Return a reduced dataset with only requested IDs.
Parameters
----------
ids : array_like
List of study IDs to include in new dataset
Returns
-------
new_dset : :obj:`nimare.dataset.Dataset`
Redcued Dataset containing only requested studies.
"""
new_dset = copy.deepcopy(self)
new_dset.ids = ids
new_dset.coordinates = new_dset.coordinates.loc[new_dset.coordinates['id'].isin(ids)]
new_dset.images = new_dset.images.loc[new_dset.images['id'].isin(ids)]
new_dset.annotations = new_dset.annotations.loc[new_dset.annotations['id'].isin(ids)]
new_dset.texts = new_dset.texts.loc[new_dset.texts['id'].isin(ids)]
temp_data = {}
for id_ in ids:
pid, expid = id_.split('-')
if pid not in temp_data.keys():
temp_data[pid] = self.data[pid].copy() # make sure to copy
temp_data[pid]['contrasts'] = {}
temp_data[pid]['contrasts'][expid] = self.data[pid]['contrasts'][expid]
new_dset.data = temp_data
return new_dset
def update_path(self, new_path):
"""
Update paths to images. Prepends new path to the relative path for
files in Dataset.images.
Parameters
----------
new_path : :obj:`str`
Path to prepend to relative paths of files in Dataset.images.
"""
relative_path_cols = [c for c in self.images if c.endswith('__relative')]
for col in relative_path_cols:
abs_col = col.replace('__relative', '')
if abs_col in self.images.columns:
LGR.info('Overwriting images column {}'.format(abs_col))
self.images[abs_col] = self.images[col].apply(try_prepend, prefix=new_path)
def _load_annotations(self):
"""
Load labels in Dataset into DataFrame.
"""
# Required columns
columns = ['id', 'study_id', 'contrast_id']
# build list of ids
all_ids = []
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=columns, data=all_ids)
id_df = id_df.set_index('id', drop=False)
exp_dict = {}
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
if 'labels' not in self.data[pid]['contrasts'][expid].keys():
continue
exp_dict[id_] = exp['labels']
temp_df = pd.DataFrame.from_dict(exp_dict, orient='index')
df = pd.merge(id_df, temp_df, left_index=True, right_index=True, how='outer')
df = df.reset_index(drop=True)
df = df.replace(to_replace='None', value=np.nan)
self.annotations = df
def _load_metadata(self):
"""
Load metadata in Dataset into DataFrame.
"""
# Required columns
columns = ['id', 'study_id', 'contrast_id']
# build list of ids
all_ids = []
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
all_ids.append([id_, pid, expid])
id_df = pd.DataFrame(columns=columns, data=all_ids)
id_df = id_df.set_index('id', drop=False)
exp_dict = {}
for pid in self.data.keys():
for expid in self.data[pid]['contrasts'].keys():
exp = self.data[pid]['contrasts'][expid]
id_ = '{0}-{1}'.format(pid, expid)
if 'metadata' not in self.data[pid]['contrasts'][expid].keys():
continue
exp_dict[id_] = exp['metadata']
temp_df = pd.DataFrame.from_dict(exp_dict, orient='index')
df = | pd.merge(id_df, temp_df, left_index=True, right_index=True, how='outer') | pandas.merge |
import os
from typing import Text
from IPython.core.display import display, HTML
from jinja2 import Environment, FileSystemLoader
from numpy.lib.function_base import disp
import pandas as pd
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils.display_util import (
get_anomalies_dataframe,
get_statistics_html,
)
from mlops.utils.sysutils import path_splitall
from mlops.model_analysis.utils import load_eval_result_text, convert_pandas_df
from mlops.model_analysis.proto import result_pb2
from mlops.model_analysis.consts import (
FILE_EVAL_DATA_STATS,
FILE_TRAIN_DATA_STATS,
FILE_DATA_SCHEMA,
FILE_PREV_EVAL_DATA_STATS,
)
# from weasyprint import HTML
def display_report(
eval_result_path: Text,
report_save_path: Text = None,
):
# Use in notebook
display(HTML(view_report(eval_result_path, report_save_path, save=False)))
def view_report(
eval_result_path: Text,
report_save_path: Text = None,
save: bool = True,
):
if not report_save_path:
report_save_path = eval_result_path
jinjia_env = Environment(
loader=FileSystemLoader(os.path.dirname(os.path.abspath(__file__)))
)
report_template = jinjia_env.get_template("performance_report.html")
eval_result: result_pb2.EvalResult = load_eval_result_text(eval_result_path)
meta_table = pd.Series(name="Meta Data")
meta_table["Evaluation Date"] = eval_result.eval_date
meta_table["Model Name"] = eval_result.model_spec.name
meta_table["Model Version"] = eval_result.model_spec.model_ver
meta_table = | pd.DataFrame(meta_table) | pandas.DataFrame |
# -*- coding: utf-8 -*-
''' This program takes a excel sheet as input where each row in first column of sheet represents a document. '''
import pandas as pd
import string
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.preprocessing import LabelEncoder
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import re
# YOU NEED TO DO THIS FIRST TIME TO DOWNLOAD FEW CORPORA FOR TEXT ANALYSIS
#import nltk
#nltk.download('stopwords')
#nltk.download('wordnet')
''' HYPER PARAMETERS '''
input_file = 'T&ADataForAnalysis'
data=pd.read_excel(input_file +'.xlsx', sheet_name="BaseData") #Include your data file instead of data.xlsx
ticket_data = data.iloc[:,0:30] #Selecting the column that has text.
Analysis_primary_columnName = 'Description (Customer visible)'
Analysis_secondary_columnName = 'Short description'
Analysis_Result_columnName = 'SerialNumber'
Analysis_ticket_columnName = 'Number'
num_clusters = 75 #Change it according to your data.
''' HYPER PARAMETERS '''
stop = set(stopwords.words('english'))
exclude = set(string.punctuation)
lemma = WordNetLemmatizer()
# Cleaning the text sentences so that punctuation marks, stop words & digits are removed
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
processed = re.sub(r"\d+","",normalized)
y = processed.split()
return y
#Converting the column of data from excel sheet into a list of documents, where each document corresponds to a group of words.
training_corpus=[]
training_description=[]
testing_corpus=[]
testing_description=[]
training_ticket_numbers=[]
testing_ticket_numbers=[]
training_output_category=[]
for index,row in ticket_data.iterrows():
line = ""
if (row[Analysis_primary_columnName] and str(row[Analysis_primary_columnName]) != 'nan' ):
line = str(row[Analysis_primary_columnName])
else:
line = str(row[Analysis_secondary_columnName])
line = line.strip()
cleaned = clean(line)
cleaned = ' '.join(cleaned)
''' IF MANUAL CLASSFICATION IS AVAILABLE, PUT THEM INTO TRAINING, ELSE TESTING'''
if (str(row[Analysis_Result_columnName]) != 'nan'):
training_description.append(line)
training_corpus.append(cleaned)
# Add ticket number for indexing
training_ticket_numbers.append(row[Analysis_ticket_columnName])
training_output_category.append(row[Analysis_Result_columnName])
else:
testing_description.append(line)
testing_corpus.append(cleaned)
testing_ticket_numbers.append(row[Analysis_ticket_columnName])
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(training_output_category)
#Count Vectoriser then tidf transformer
transformer = TfidfVectorizer(stop_words='english')
tfidf = transformer.fit_transform(training_corpus)
#%%
import matplotlib.pyplot as plt
Sum_of_squared_distances = []
k_step = 10
k_start = 100
k_max = 200
K = range(k_start, k_max, k_step)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(tfidf)
Sum_of_squared_distances.append(km.inertia_)
k_optimal = k_start + (Sum_of_squared_distances.index(min(Sum_of_squared_distances)) + 1) * k_step
plt.plot(K, Sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Elbow Method For Optimal k')
plt.show()
#%%
k_optimal = 20
# FIRST DO UNSUPERVISED CLUSTERING ON TEXT USING KMeans
modelkmeans = KMeans(n_clusters=k_optimal)
modelkmeans.fit(tfidf)
clusters = modelkmeans.labels_.tolist()
classification_dic={'Issue': training_description, 'Transformed Data':training_corpus, 'Machine Cluster':clusters, 'Human Classification': training_output_category} #Creating dict having doc with the corresponding cluster number.
frame=pd.DataFrame(classification_dic, index=[training_ticket_numbers], columns=['Issue', 'Transformed Data', 'Machine Cluster', 'Human Classification']) # Converting it into a dataframe.
# FIND SIGNIFICANT TERMS IN EACH CLUSTER
xvalid_tfidf_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(5,6))
cluster_count_list = []
cluster_tag_list = []
cluster_themes_dict = {}
for i in set(clusters):
current_cluster_data = [training_corpus[x] for x in np.where(modelkmeans.labels_ == i)[0]]
try:
current_tfs = xvalid_tfidf_ngram.fit_transform(current_cluster_data)
current_tf_idfs = dict(zip(xvalid_tfidf_ngram.get_feature_names(), xvalid_tfidf_ngram.idf_))
tf_idfs_tuples = current_tf_idfs.items()
cluster_themes_dict[i] = sorted(tf_idfs_tuples, key = lambda x: x[1])[:1]
cluster_tag_list.append(str(cluster_themes_dict[i][0][0]))
# cluster_tag_list.append("".join(format([x[0] for x in cluster_themes_dict[i]])))
except:
cluster_tag_list.append(current_cluster_data[0])
cluster_themes_dict[i] = (current_cluster_data[0], 0)
cluster_count_list.append(clusters.count(i))
print (f'Cluster {i} key words: {cluster_tag_list[i]}')
def NameCluster(cluster_no):
return "" + str(cluster_no+1)
plot_frame = pd.DataFrame({'Cluster Count':cluster_count_list, 'Machine Tag': cluster_tag_list, 'Cluster': list(map(NameCluster, set(clusters)))},
index = set(clusters),
columns=['Machine Tag', 'Cluster Count', 'Cluster'])
def tagCluster(cluster_no):
# return the first tagging
return cluster_themes_dict[cluster_no][0][0]
list(map(tagCluster, clusters))
#%%
from matplotlib.ticker import PercentFormatter
plot_frame = plot_frame.sort_values(by='Cluster Count',ascending=False)
plot_frame["Cumulative Percentage"] = plot_frame['Cluster Count'].cumsum()/plot_frame['Cluster Count'].sum()*100
fig, ax = plt.subplots()
#ax.bar(plot_frame['Machine Tag'], plot_frame["Cluster Count"], color="C0")
#ax.bar(plot_frame.index, plot_frame["Cluster Count"], color="C0")
ax.bar(plot_frame['Cluster'], plot_frame["Cluster Count"], color="C0")
ax2 = ax.twinx()
#ax2.plot(plot_frame['Machine Tag'], plot_frame["cumpercentage"], color="C1", marker="D", ms=7)
#ax2.plot(plot_frame.index, plot_frame["cumpercentage"], color="C1", marker="D", ms=7)
ax2.plot(plot_frame['Cluster'], plot_frame["Cumulative Percentage"], color="C1", marker="D", ms=7)
ax2.yaxis.set_major_formatter(PercentFormatter())
ax.tick_params(axis="y", colors="C0")
ax2.tick_params(axis="y", colors="C1")
ax.set_title ("Pareto chart of Analyzed Ticket Data", fontsize=12)
ax.set_ylabel("# of Tickets", fontsize=7)
ax.set_xlabel("Category #", fontsize=7)
plt.legend()
plt.show()
#%%
# save to file
frame.to_excel(input_file + "_KMeans_Clusters.xlsx")
print ("Resuls written to " + input_file + "_KMeans_Clusters.xlsx")
lookup = frame.groupby(['Machine Cluster', 'Human Classification'])['Human Classification'].agg({'no':'count'})
mask = lookup.groupby(level=0).agg('idxmax')
lookup = lookup.loc[mask['no']]
lookup = lookup.reset_index()
lookup = lookup.set_index('Machine Cluster')['Human Classification'].to_dict()
#%%
# SECOND do Clustering the document with KNN classifier - Supervised learning
# Only possible when you have Manual classification
modelknn = KNeighborsClassifier(n_neighbors=25)
modelknn.fit(tfidf, integer_encoded)
#%%
''' PREDICT THE DATA WITH KNN AND KMEANS '''
print (f"Testing data count {len(testing_corpus)}")
#Count Vectoriser then tidf transformer
testing_tfidf = transformer.transform(testing_corpus)
predicted_labels_knn = modelknn.predict(testing_tfidf )
predicted_labels_kmeans = modelkmeans.predict(testing_tfidf )
classification_dic={'Issue': testing_description, 'Transformed Data' : testing_corpus, 'Machine Cluster':predicted_labels_kmeans} #Creating dict having doc with the corresponding cluster number.
predicted_frame= | pd.DataFrame(classification_dic, index=[testing_ticket_numbers], columns=['Issue', 'Transformed Data', 'Machine Cluster']) | pandas.DataFrame |
'''
pandas demo -数据清洗 ( numpy-1.19.2 pandas-1.1.2 scikit-learn-0.23.2 )
'''
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
from sqlalchemy import create_engine
def is_null():
df = pd.DataFrame(np.random.randn(10, 6))
df.iloc[:4, 1] = None
df.iloc[:2, 4:6] = None
df.iloc[6, 3:5] = None
df.iloc[8, 0:2] = None
print('元数据:\r\n', df)
result = df.isnull()
print('判断缺失值:\r\n', result)
result = df.isnull().any() # 行是否全为空
print('列级别的判断缺失值:\r\n', result, type(result))
result = df[df.isnull().values == True].drop_duplicates() # 只显示存在缺失值的行。使用drop_duplicates去掉重复的行
print('列级别的判断缺失值:\r\n', result, type(result))
result = df.columns[df.isnull().any() == True]
print('为空或NA的列索引:\r\n', result, type(result))
num = df.isnull().sum()
print('每列为空的个数:\r\n', num)
num = df.isnull().sum(axis=1)
print('每行为空数据的个数:\r\n', num)
def clear_data():
df = pd.DataFrame(np.random.randn(10, 6))
df.iloc[:4, 1] = None
df.iloc[:2, 4:6] = None
df.iloc[6, 3:5] = None
df.iloc[8, 0:2] = None
print('元数据:\r\n', df)
print('\r\ndropna 数据:\r\n', df.dropna()) # 不改变原数据
print('\r\ndropna 删除列:\r\n', df.dropna(axis=1))
print('\r\ndropna 全空才删除行:\r\n', df.dropna(how='all'))
print('\r\ndropna 非空至少4个行:\r\n', df.dropna(thresh=4))
print('\r\ndropna 检查2,4 列,删除相应的行:\r\n', df.dropna(subset=[2, 4]))
print('\r\ndropna 检查2,4行,删除相应的列:\r\n', df.dropna(axis=1, subset=[2, 4]))
print('\r\nfill 填充数据:\r\n', df.fillna(0)) # 全填充0
print('\r\nfill 横向,向前填充:\r\n', df.fillna(axis=1, method='ffill'))
print('\r\nfill 纵向,向上填充:\r\n', df.fillna(axis=0, method='ffill'))
print('\r\nfill 不同的列用不同的数填充:\r\n', df.fillna(value={0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5})) # 全填充0
print('\r\nfill 只填充1次:\r\n', df.fillna(value={0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5}, limit=1))
def clear_duplicated_data():
df = pd.DataFrame({'k1': ['one', 'two'] * 4, 'k2': [1, 2, 1, 2, 3, 4, 5, 6]})
print('元数据:\r\n', df)
print('duplicated 判断重复:\r\n', df.duplicated())
print('duplicated 判断重复 全标记True:\r\n', df.duplicated(keep=False))
print('duplicated 检查指定的列:\r\n', df.duplicated(subset='k1'))
print('drop_duplicates 删除重复的行:\r\n', df.drop_duplicates())
print('drop_duplicates 检查指定的列,删除重复的行:\r\n', df.drop_duplicates(subset='k1'))
def clear_except_data():
df = pd.DataFrame(np.random.randn(1000, 4)) # 1000*4的正态分布
print(df.describe())
col = df[2]
print('查找某列数据 绝对值>3的:\r\n', col[np.abs(col) > 3])
print('查找全部含有 超过3,-3的值 取1行:\r\n', df[(np.abs(df) > 3).any(1)])
df[np.abs(df) > 3] = np.sign(df) * 3
print('修改数据后的统计描述:\r\n', df.describe())
print('原数据的符号情况:\r\n', np.sign(df).head(10))
print('-------3σ原则------')
df = pd.read_csv('a.csv', encoding='bgk')
print(df)
# 定义拉依达原则 识别异常函数
def outRange(ser: Series):
boolInd = (ser.mean() - 3 * ser.std() > ser) | (ser.mean() + 3 * ser.std() > ser) # bool 序列
index = np.arange(len(ser))[boolInd] # 布尔序列的下标
out_range = ser.iloc[index]
return out_range
outlier = outRange(df['counts'])
print(len(outlier), outlier.max(), outlier.min())
print('-------箱型图------')
def out_range(ser: Series):
QL = ser.quantile(0.25)
QU = ser.quantile(0.75)
IQR = QU - QL
# 超过上下界
ser.loc[ser > QU + 1.5 * IQR] = QU + 1.5 * IQR
ser.loc[ser < QL - 1.5 * IQR] = QL - 1.5 * IQR
return ser
df['counts'] = out_range(df['counts'])
# 数据转换
def data_convert():
df = pd.DataFrame(
{'food': ['bacon', 'pork', 'bacon', 'pastrami', 'beef', 'Bacon', 'Pastrami', 'honey ham', 'nova lox'],
'ounce': [4, 3, 12, 2, 3, 4, 5, 12, 2]})
print('元数据:\r\n', df)
# 添加一列表示 该肉的动物涞源
animals = {'bacon': 'pig', 'pork': 'pig', 'pastrami': 'cow', 'beef': 'cow', 'honey ham': 'pig', 'nova lox': 'salmon'}
lower = df['food'].str.lower()
# print(lower)
df['animal'] = lower.map(animals) # 映射
print('map \r\n', df)
df['animal_2'] = df['food'].map(lambda x: animals[x.lower()])
print('map lambda \r\n', df)
# replace
data = pd.Series([1, -999, 2, -999, 1, -1000, 2, -999])
print('Series 替换 \r\n', data.replace(-999, np.NAN))
print('Series 替换多个值 \r\n', data.replace([-999, -1000], [np.NAN, 0]))
# 重命名轴索引
df = pd.DataFrame(np.arange(12).reshape(3, 4),
index=['A', 'B', 'C'], columns=['aa', 'bb', 'cc', 'dd'])
df.index = df.index.map(lambda x: x.lower())
print('index.map \r\n', df)
df.rename(index=str.title, columns=str.upper,
inplace=True) # Python title() 方法返回"标题化"的字符串,就是说所有单词都是以大写开始,其余字母均为小写(见 istitle())。
print('dataframe.rename \r\n', df)
# 使用rename
print('-' * 30, 'ya')
df = pd.DataFrame(
{'animal': ['pig', 'cow', 'hen', 'cat', 'dog', 'rat'],
'count': [14, 13, 22, 56, 32, 29]})
print('get_dummies: \r\n', pd.get_dummies(df))
# 等宽离散
counts = pd.cut(df['count'], 3)
print('离散后的数量', counts.value_counts())
# 等频率离散
def samRateCut(data: DataFrame, k):
w = data.quantile(np.arange(0, 1 + 1.0 / k, 1.0 / k))
data = pd.cut(data, w)
return data
result = samRateCut(df['count'], 3)
print('等频离散后的数量', result.value_counts())
# 自定义离散
def kmeanCunt(data: DataFrame, k):
from sklearn.cluster import KMeans
kmodel = KMeans(n_clusters=k) # 建立模型
kmodel.fit(data.values.reshape(len(data), 1)) # 训练模型
c = | pd.DataFrame(kmodel.cluster_centers_) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA), <NAME> (UW).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import numpy as np
import pandas as pd
from pyopenms import *
import os
# import glob
# import re
import matplotlib
import matplotlib.pyplot as plt
# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# from matplotlib.figure import Figure
import seaborn as sns
# from scipy import stats
from tkinter import *
# import tkinter as tk
from tkinter import ttk
# from tkinter import messagebox
# from tkinter.messagebox import showinfo
from tkinter import filedialog
import datetime
def get_tune1(tunef1):
tunef1.configure(state="normal")
tunef1.delete(1.0, END)
filedir = filedialog.askopenfilename(filetypes=(("mzML Files", "*.mzML"), ("all files", "*.*")))
tunef1.insert(INSERT, filedir)
tunef1.configure(state="disabled")
def get_tune2(tunef2):
# setdir = filedialog.askdirectory()
tunef2.configure(state="normal")
tunef2.delete(1.0, END)
filedir = filedialog.askopenfilename(filetypes=(("mzML Files", "*.mzML"), ("all files", "*.*")))
tunef2.insert(INSERT, filedir)
tunef2.configure(state="disabled")
def imp_tunekey(maploc_tune):
# map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
maploc_tune.configure(state="normal")
maploc_tune.delete(1.0, END)
map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
maploc_tune.insert(INSERT, map1)
maploc_tune.configure(state="disabled")
def exportdata(covlist, covlist_dict, maploc_tune):
"""export result to excel file"""
exp_temp_loc = maploc_tune.get('1.0', 'end-1c') # 'Tuning_spname_dict'
neg_temp = pd.read_excel(exp_temp_loc, sheet_name='NEG', header=0, index_col=None, na_values='.')
pos_temp = | pd.read_excel(exp_temp_loc, sheet_name='POS', header=0, index_col=None, na_values='.') | pandas.read_excel |
from src.utility.DBUtils import get_engine
from src.model.BaseModel import BaseModel
import pandas as pd
import numpy as np
class Summary:
# use a sqlite database to save and fetch experiment results
def __init__(self, db_path):
self.__engine = get_engine(db_path)
self.table = None
self.__update_table()
@staticmethod
def __get_latest_result(df, group_list):
latest = (df.assign(rnk=df.groupby(group_list)['ts']
.rank(method='first', ascending=False))) \
.query('rnk < 2') \
.drop(columns=["rnk"])
return latest
def __update_table(self):
table = pd.read_sql("select * from MetaData", self.__engine, index_col="id")
table.ts = table.ts.astype(np.datetime64)
self.table = table.copy()
def summarize_cv(self,
dataset_name,
metrics=None):
self.__update_table()
metrics = self.table["metric"].unique() if not metrics else metrics
df = self.table[(self.table["dataset"] == dataset_name)
& (self.table["metric"].isin(metrics))
& (self.table["fold"] >= 0)] \
.drop(columns=["dataset", "path"])
# return the latest result of each fold
df = self.__get_latest_result(df, ["model", "hyper", "metric", "fold"])
summary = df.groupby(["model", "hyper", "metric"]) \
.agg(mean=("value", np.mean),
std=("value", np.std)) \
.reset_index(inplace=False)
rank_summary = summary.assign(rnk=summary.groupby("metric")['mean']
.rank(method='first', ascending=False)) \
.sort_values(["metric","rnk"])
return rank_summary
def get_model_test_perf(self,
dataset_name,
model_name):
self.__update_table()
df = self.table[(self.table["dataset"] == dataset_name)
& (self.table["fold"] == -1)
& (self.table["model"] == model_name)] \
.drop(columns=["dataset", "path", "fold"])
df = self.__get_latest_result(df, ["model", "hyper", "metric"])
return df
def get_optimal_params(self, dataset_name, model_name, metric):
test_perf = self.get_model_test_perf(dataset_name, model_name)
if len(test_perf) == 0:
return None
ascending = True if metric in ["rmse"] else False
filtered = test_perf[test_perf.metric == metric].sort_values("value", ascending=ascending)
filtered.reset_index(drop=True, inplace=True)
hyper = filtered.loc[0, "hyper"]
value = filtered.loc[0, "value"]
print("Best {} of {} is found as {}".format(metric, model_name, value))
return hyper
def get_result_for_params(self, dataset_name, model_name, hyper, metric, verbose=True):
test_perf = self.get_model_test_perf(dataset_name, model_name)
filtered = test_perf[(test_perf.model == model_name) & (test_perf["hyper"] == hyper) & (test_perf["metric"] == metric)].reset_index(drop=True)
if len(filtered) == 0:
return | pd.DataFrame() | pandas.DataFrame |
from secrets import IEX_CLOUD_API_TOKEN as iex_tkn
import pandas as pd
import requests
from statistics import mean
from scipy import stats
import math
stocks_file = 'sp_500_stocks.csv'
api_url = "https://sandbox.iexapis.com/stable/"
def get_info(symbol):
endpt = f"stock/{symbol}/stats?token={iex_tkn}"
response = requests.get(api_url+endpt).json()
return response
def get_stocks_info(stocks):
symbol_strings = []
for chunk in list(chunks(stocks['Ticker'], 100)):
symbol_strings.append(','.join(chunk))
symbol_string = ''
data = []
for elem in symbol_strings:
symbol_string = elem
batch_endpt = f"stock/market/batch?symbols={symbol_string}&types=price,stats&token={iex_tkn}"
response = requests.get(api_url+batch_endpt).json()
for symbol in symbol_string.split(","):
data.append(
{
'ticker': symbol,
'price' : response[symbol]['price'],
'one year': response[symbol]['stats']['year1ChangePercent'],
'one year percentile': 'N/A',
'six mnth': response[symbol]['stats']['month6ChangePercent'],
'six mnth percentile': 'N/A',
'three mnth': response[symbol]['stats']['month3ChangePercent'],
'three mnth percentile': 'N/A',
'one mnth': response[symbol]['stats']['month1ChangePercent'],
'one mnth percentile': 'N/A',
'hqm score': 'N/A'
}
)
return pd.DataFrame(data)
def hqm(df, portfolio_size):
#handle missing values for one year and sort in place
df.sort_values('one year', ascending = False, inplace = True)
df.reset_index(drop = True, inplace = True)
#Calculate the percentiles and hqm score for each row
for index, row in df.iterrows():
df.at[index, 'one year percentile'] = stats.percentileofscore(df['one year'], row['one year'])
df.at[index, 'six mnth percentile'] = stats.percentileofscore(df['six mnth'], row['six mnth'])
df.at[index, 'three mnth percentile'] = stats.percentileofscore(df['three mnth'], row['three mnth'])
df.at[index, 'one mnth percentile'] = stats.percentileofscore(df['one mnth'], row['one mnth'])
vals = []
vals.append(df.at[index, 'one year percentile'])
vals.append(df.at[index, 'six mnth percentile'])
vals.append(df.at[index, 'three mnth percentile'])
vals.append(df.at[index, 'one mnth percentile'])
df.at[index, 'hqm score'] = mean(vals)
#create a copy with just the cols we need, sort by hqm and keep the top 50
df_hqm = df[['ticker', 'price', 'hqm score']].copy()
df_hqm.sort_values('hqm score', ascending = False, inplace = True)
df_hqm = df_hqm[:50]
df_hqm.reset_index(drop = True, inplace = True)
df_hqm['buy'] = 0
#Calculate the number of shares to buy - in equal proportion
position_size = portfolio_size / len(df_hqm)
for index, row in df_hqm.iterrows():
if row['price'] == 0:
print(row['ticker'])
else:
df_hqm.at[index, 'buy'] = math.floor(position_size / row['price'])
return df_hqm
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i+n]
def portfolio_input():
portfolio_size = input('Enter the size of your portfolio: ')
while not (portfolio_size.isnumeric() or portfolio_size >= 1000000):
portfolio_size = input('You can only use a number a million or greater. Enter the size of your portfolio: ')
return float(portfolio_size)
def get_stocks(filename):
stocks = | pd.read_csv(filename) | pandas.read_csv |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
| tm.assert_frame_equal(result, df) | pandas._testing.assert_frame_equal |
"""
Tests for kf_lib_data_ingest/extract/operations.py
"""
import pandas
import pytest
from kf_lib_data_ingest.common.type_safety import function
from kf_lib_data_ingest.etl.extract import operations
from test_type_safety import type_exemplars
df = pandas.DataFrame({"COL_A": ["1", "2", "3"]})
other_df = pandas.DataFrame({"COL_B": ["4", "5", "6"]})
bigger_df = pandas.DataFrame(
{
"COL_A": ["1", "2", "3"],
"COL_B": ["a", "b", "c"],
"COL_C": ["z", "y", "x"],
"COL_D": ["3", "2", "1"],
}
)
longvalue_df = pandas.DataFrame({"COL_A": ["1a1", "2a2", "3a3"]})
def _in_out_variants(map_wrap, val, in_col=None, out_col=None):
"""
Calls some function map_wrap with arguments according to what is optionally
passed in here.
"""
if in_col is not None and out_col is not None:
map_wrap(val, in_col, out_col)
elif in_col is not None:
map_wrap(val, in_col)
elif out_col is not None:
map_wrap(val, out_col)
else:
map_wrap(val)
def _test_map_allowed_types(map_wrap, allowed_types, in_col=None, out_col=None):
"""
For a given map_wrap function, tests that all unallowed argument types
raise a TypeError and all allowed argument types do not.
"""
for k, v in type_exemplars:
if v not in allowed_types:
with pytest.raises(TypeError):
_in_out_variants(map_wrap, k, in_col, out_col)
else:
_in_out_variants(map_wrap, k, in_col, out_col)
def test_df_map():
"""
Test that df_map returns a df and doesn't modify the original.
"""
# tests passing allowed and disallowed types
_test_map_allowed_types(operations.df_map, {function, callable})
# verify that df_map doesn't modify the original df
func = operations.df_map(lambda df: df)
assert df is not func(df)
# and that the contents are equal
assert func(df).equals(df)
# and that the above test isn't a trick
func = operations.df_map(lambda df: other_df)
assert func(df).equals(other_df)
# verify that df mapping func may only return a DataFrame
func = operations.df_map(lambda df: None)
with pytest.raises(TypeError) as e:
func(df)
assert "DataFrame" in str(e.value)
def test_keep_map():
# verify that keep_map doesn't modify the original df
func = operations.keep_map("COL_A", "COL_A")
assert df is not func(df)
func = operations.keep_map("COL_B", "OUT_COL")
assert func(bigger_df).equals(
pandas.DataFrame({"OUT_COL": bigger_df["COL_B"]})
)
def test_value_map():
# tests passing allowed and disallowed types
_test_map_allowed_types(
operations.value_map,
{function, callable, dict, str},
in_col="COL_A",
out_col="OUT_COL",
)
# verify that value_map doesn't modify the original df
func = operations.value_map(lambda x: x, "COL_A", "COL_A")
assert df is not func(df)
# mapper function
func = operations.value_map(lambda x: 5, "COL_A", "OUT_COL")
assert func(df).equals(pandas.DataFrame({"OUT_COL": [5, 5, 5]}))
# mapper dict
func = operations.value_map(
{"1": "a", "2": "b", "3": "c"}, "COL_A", "OUT_COL"
)
assert func(df).equals( | pandas.DataFrame({"OUT_COL": ["a", "b", "c"]}) | pandas.DataFrame |
from typing import Union, cast
import warnings
import numpy as np
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
*,
check_index=True,
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
check_index : bool, default True
Whether to check index equivalence. If False, then compare only values.
.. versionadded:: 1.3.0
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
if check_index:
# GH #38183
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and | is_interval_dtype(right.dtype) | pandas.core.dtypes.common.is_interval_dtype |
import os
import pandas as pd
from tqdm import tqdm
import pipelines.p1_orca_by_stop as p1
from utils import constants, data_utils
NAME = 'p2_aggregate_orca'
WRITE_DIR = os.path.join(constants.PIPELINE_OUTPUTS_DIR, NAME)
def load_input():
path = os.path.join(constants.PIPELINE_OUTPUTS_DIR, f'{p1.NAME}.csv')
return | pd.read_csv(path) | pandas.read_csv |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/10/19 9:28
Desc: 新浪财经-A股-实时行情数据和历史行情数据(包含前复权和后复权因子)
"""
import re
import json
import demjson
from py_mini_racer import py_mini_racer
import pandas as pd
import requests
from tqdm import tqdm
from akshare.stock.cons import (zh_sina_a_stock_payload,
zh_sina_a_stock_url,
zh_sina_a_stock_count_url,
zh_sina_a_stock_hist_url,
hk_js_decode,
zh_sina_a_stock_hfq_url,
zh_sina_a_stock_qfq_url,
zh_sina_a_stock_amount_url)
def _get_zh_a_page_count() -> int:
"""
所有股票的总页数
http://vip.stock.finance.sina.com.cn/mkt/#hs_a
:return: 需要抓取的股票总页数
:rtype: int
"""
res = requests.get(zh_sina_a_stock_count_url)
page_count = int(re.findall(re.compile(r"\d+"), res.text)[0]) / 80
if isinstance(page_count, int):
return page_count
else:
return int(page_count) + 1
def stock_zh_a_spot() -> pd.DataFrame:
"""
新浪财经-A股获取所有A股的实时行情数据, 重复运行本函数会被新浪暂时封 IP
http://vip.stock.finance.sina.com.cn/mkt/#qbgg_hk
:return: pandas.DataFrame
"""
big_df = pd.DataFrame()
page_count = _get_zh_a_page_count()
zh_sina_stock_payload_copy = zh_sina_a_stock_payload.copy()
for page in tqdm(range(1, page_count+1), desc="Please wait for a moment"):
zh_sina_stock_payload_copy.update({"page": page})
r = requests.get(
zh_sina_a_stock_url,
params=zh_sina_stock_payload_copy)
data_json = demjson.decode(r.text)
big_df = big_df.append(pd.DataFrame(data_json), ignore_index=True)
big_df = big_df.astype({"trade": "float",
"pricechange": "float",
"changepercent": "float",
"buy": "float",
"sell": "float",
"settlement": "float",
"open": "float",
"high": "float",
"low": "float",
"volume": "float",
"amount": "float",
"per": "float",
"pb": "float",
"mktcap": "float",
"nmc": "float",
"turnoverratio": "float",
})
return big_df
def stock_zh_a_daily(symbol: str = "sh600751", adjust: str = "") -> pd.DataFrame:
"""
新浪财经-A股-个股的历史行情数据, 大量抓取容易封IP
:param symbol: sh600000
:type symbol: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据; hfq-factor: 返回后复权因子; hfq-factor: 返回前复权因子
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
res = requests.get(zh_sina_a_stock_hist_url.format(symbol))
js_code = py_mini_racer.MiniRacer()
js_code.eval(hk_js_decode)
dict_list = js_code.call(
'd', res.text.split("=")[1].split(";")[0].replace(
'"', "")) # 执行js解密代码
data_df = pd.DataFrame(dict_list)
data_df.index = pd.to_datetime(data_df["date"])
del data_df["date"]
data_df = data_df.astype("float")
r = requests.get(zh_sina_a_stock_amount_url.format(symbol, symbol))
amount_data_json = demjson.decode(r.text[r.text.find("["): r.text.rfind("]") + 1])
amount_data_df = pd.DataFrame(amount_data_json)
amount_data_df.index = pd.to_datetime(amount_data_df.date)
del amount_data_df["date"]
temp_df = pd.merge(data_df, amount_data_df, left_index=True, right_index=True, how="outer")
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["amount"] = temp_df["amount"] * 10000
temp_df["turnover"] = temp_df["volume"] / temp_df["amount"]
temp_df.columns = ['open', 'high', 'low', 'close', 'volume', 'outstanding_share', 'turnover']
if adjust == "":
return temp_df
if adjust == "hfq":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
temp_df = pd.merge(
temp_df, hfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] * temp_df["hfq_factor"]
temp_df["high"] = temp_df["high"] * temp_df["hfq_factor"]
temp_df["close"] = temp_df["close"] * temp_df["hfq_factor"]
temp_df["low"] = temp_df["low"] * temp_df["hfq_factor"]
temp_df.dropna(how="any", inplace=True)
return temp_df.iloc[:, :-1]
if adjust == "qfq":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
temp_df = pd.merge(
temp_df, qfq_factor_df, left_index=True, right_index=True, how="outer"
)
temp_df.fillna(method="ffill", inplace=True)
temp_df = temp_df.astype(float)
temp_df["open"] = temp_df["open"] / temp_df["qfq_factor"]
temp_df["high"] = temp_df["high"] / temp_df["qfq_factor"]
temp_df["close"] = temp_df["close"] / temp_df["qfq_factor"]
temp_df["low"] = temp_df["low"] / temp_df["qfq_factor"]
temp_df.dropna(how="any", inplace=True)
return temp_df.iloc[:, :-1]
if adjust == "hfq-factor":
res = requests.get(zh_sina_a_stock_hfq_url.format(symbol))
hfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
hfq_factor_df.columns = ["date", "hfq_factor"]
hfq_factor_df.index = pd.to_datetime(hfq_factor_df.date)
del hfq_factor_df["date"]
return hfq_factor_df
if adjust == "qfq-factor":
res = requests.get(zh_sina_a_stock_qfq_url.format(symbol))
qfq_factor_df = pd.DataFrame(
eval(res.text.split("=")[1].split("\n")[0])['data'])
qfq_factor_df.columns = ["date", "qfq_factor"]
qfq_factor_df.index = pd.to_datetime(qfq_factor_df.date)
del qfq_factor_df["date"]
return qfq_factor_df
def stock_zh_a_minute(symbol: str = 'sh600751', period: str = '5', adjust: str = "") -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "1023",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split('=(')[1].split(");")[0])).iloc[:, :6]
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = | pd.to_datetime(temp_df["date"]) | pandas.to_datetime |
#!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
import argparse
from collections import defaultdict
import sys
import intervaltree
from intervaltree import IntervalTree
import pandas
from evalutils import IntervalForest, populate_index_from_simulation, compact
from evalutils import assess_variants_vcf, assess_variants_mvf
from evalutils import subset_variants, subset_vcf, subset_mvf
from evalutils import load_kevlar_vcf, load_triodenovo_vcf, load_gatk_mvf
import kevlar
from kevlar.vcf import VCFReader
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tolerance', type=int, metavar='T', default=10,
help='extend real variants by T nucleotides when '
'querying for overlap with variant calls; default is '
'10')
parser.add_argument('--mode', choices=('Kevlar', 'GATK', 'TrioDenovo'),
default='Kevlar', help='Kevlar|GATK|TrioDenovo')
parser.add_argument('--cov', default='30', help='coverage')
parser.add_argument('--correct', help='print correct variants to file')
parser.add_argument('--missing', help='print missing variants to file')
parser.add_argument('--false', help='print false variants to file')
parser.add_argument('--collisions', help='print calls that match the '
'same variant')
parser.add_argument('--vartype', choices=('SNV', 'INDEL'), default=None)
parser.add_argument('--minlength', type=int, default=None)
parser.add_argument('--maxlength', type=int, default=None)
parser.add_argument('--do-all', action='store_true', help='ignore all '
'other arguments and analyze all data')
parser.add_argument('simvar', help='simulated variants (in custom 3-4 '
'column tabular format)')
parser.add_argument('varcalls', help='VCF file of variant calls')
return parser
def load_index(simvarfile, vartype=None, minlength=None, maxlength=None):
with kevlar.open(simvarfile, 'r') as instream:
if vartype:
instream = subset_variants(
instream, vartype, minlength=minlength, maxlength=maxlength
)
index = populate_index_from_simulation(instream, 'chr17')
return index
def handle_collisions(mapping, outfile):
numcollisions = 0
for variant, calllist in mapping.items():
if len(calllist) > 1:
numcollisions += 1
if numcollisions > 0:
print('WARNING:', numcollisions, 'variants matched by multiple calls',
file=sys.stderr)
if outfile is None:
return
with open(outfile, 'w') as outstream:
for variant, calllist in mapping.items():
if len(calllist) > 1:
print('\n#VARIANT:', variant, file=outstream)
for varcall in calllist:
if args.mvf:
print(' -', varcall, file=outstream)
else:
print(' -', varcall.vcf, file=outstream)
def handle_missing(missing, outfile):
if outfile is None:
return
with kevlar.open(outfile, 'w') as outstream:
for variant in missing:
print(variant.begin, *variant.data.split('<-'), sep='\t',
file=outstream)
def handle_calls(calls, outfile, mvf=False):
if outfile is None:
return
with kevlar.open(outfile, 'w') as outstream:
if mvf:
for varcall in calls:
print(varcall, file=outstream)
else:
writer = kevlar.vcf.VCFWriter(outstream)
for varcall in calls:
writer.write(varcall)
def evaluate(simvarfile, varcalls, mode, vartype=None, minlength=None,
maxlength=None, tolerance=10, coverage='30', correctfile=None,
falsefile=None, missingfile=None, collisionsfile=None):
assert mode in ('Kevlar', 'GATK', 'TrioDenovo')
index = load_index(simvarfile, vartype, minlength, maxlength)
if mode == 'GATK':
variants = load_gatk_mvf(varcalls, vartype, minlength, maxlength)
assess_func = assess_variants_mvf
elif mode == 'Kevlar':
variants = load_kevlar_vcf(
varcalls, index, delta=tolerance, vartype=vartype,
minlength=minlength, maxlength=maxlength
)
assess_func = assess_variants_vcf
elif mode == 'TrioDenovo':
variants = load_triodenovo_vcf(
varcalls, vartype, minlength, maxlength, coverage
)
assess_func = assess_variants_vcf
correct, false, missing, mapping = assess_func(
variants, index, delta=tolerance
)
handle_collisions(mapping, collisionsfile)
handle_missing(missing, missingfile)
handle_calls(correct, correctfile, mvf=(mode == 'GATK'))
handle_calls(false, falsefile, mvf=(mode == 'GATK'))
return len(mapping), len(false), len(missing)
################################################################################
def vartypestr(vartype, minlength, maxlength):
if vartype is None:
return 'All'
assert vartype in ('SNV', 'INDEL')
if vartype == 'SNV':
return 'SNV'
return 'INDEL {}-{}bp'.format(minlength, maxlength)
def main(args):
correct, false, missing = evaluate(
args.simvar, args.varcalls, args.mode, vartype=args.vartype,
minlength=args.minlength, maxlength=args.maxlength,
tolerance=args.tolerance, coverage=args.cov, correctfile=args.correct,
falsefile=args.false, missingfile=args.missing,
collisionsfile=args.collisions
)
vartype = vartypestr(args.vartype, args.minlength, args.maxlength)
colnames = ['Caller', 'Coverage', 'VarType', 'Correct', 'False', 'Missing']
data = [args.mode, args.cov, vartype, correct, false, missing]
row = {c: v for c, v in zip(colnames, data)}
table = | pandas.DataFrame(columns=colnames) | pandas.DataFrame |
import sys,os
import pandas as pd
import numpy as np
from statsmodels.tsa.api import ARIMA, SARIMAX, ExponentialSmoothing, VARMAX
from statsmodels.tsa.arima.model import ARIMA as StateSpaceARIMA
import unittest
from nyoka import ExponentialSmoothingToPMML, StatsmodelsToPmml
class TestMethods(unittest.TestCase):
def getData1(self):
# data with trend and seasonality present
# no of international visitors in Australia
data = [41.7275, 24.0418, 32.3281, 37.3287, 46.2132, 29.3463, 36.4829, 42.9777, 48.9015, 31.1802, 37.7179,
40.4202, 51.2069, 31.8872, 40.9783, 43.7725, 55.5586, 33.8509, 42.0764, 45.6423, 59.7668, 35.1919,
44.3197, 47.9137]
index = pd.date_range(start='2005', end='2010-Q4', freq='QS')
ts_data = pd.Series(data, index)
ts_data.index.name = 'datetime_index'
ts_data.name = 'n_visitors'
ts_data = ts_data.to_frame()
return ts_data
def getData2(self):
# data with trend but no seasonality
# no. of annual passengers of air carriers registered in Australia
data = [17.5534, 21.86, 23.8866, 26.9293, 26.8885, 28.8314, 30.0751, 30.9535, 30.1857, 31.5797, 32.5776,
33.4774, 39.0216, 41.3864, 41.5966]
index = pd.date_range(start='1990', end='2005', freq='A')
ts_data = | pd.Series(data, index) | pandas.Series |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from seir.sampling.model import SamplingNInfectiousModel
import logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
logging.info('Loading data')
# read calibration data
actual_hospitalisations = pd.read_excel('data/calibration.xlsx', sheet_name='Hospitalisations')
actual_hospitalisations['Date'] = [pd.to_datetime(x, ).date() for x in actual_hospitalisations['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
actual_infections = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_provincial_cumulative_timeline_confirmed.csv')
actual_infections.rename(columns={'date': 'Date', 'total': 'Cum. Confirmed'}, inplace=True)
actual_infections.index = pd.to_datetime(actual_infections['Date'], dayfirst=True)
actual_infections = actual_infections.resample('D').mean().ffill().reset_index()
actual_infections['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in actual_infections['Date']]
# TODO: should check if file is downloaded: if not, download, then use the downloaded file
reported_deaths = pd.read_csv(
'https://raw.githubusercontent.com/dsfsi/covid19za/master/data/covid19za_timeline_deaths.csv')
reported_deaths.rename(columns={'date': 'Date'}, inplace=True)
reported_deaths['Date'] = [pd.to_datetime(x, dayfirst=True).date() for x in reported_deaths['Date']]
actual_deaths = reported_deaths.groupby('Date').report_id.count().reset_index()
actual_deaths.rename(columns={'report_id': 'Daily deaths'}, inplace=True)
actual_deaths.index = pd.to_datetime(actual_deaths['Date'])
actual_deaths = actual_deaths.resample('D').mean().fillna(0).reset_index()
actual_deaths['Cum. Deaths'] = np.cumsum(actual_deaths['Daily deaths'])
df_assa = pd.read_csv('data/scenarios/scenario1_daily_output_asymp_0.75_R0_3.0_imported_scale_2.5_lockdown_0.6'
'_postlockdown_0.75_ICU_0.2133_mort_1.0_asympinf_0.5.csv',
parse_dates=['Day'],
date_parser=lambda t: pd.to_datetime(t, format='%Y-%m-%d'))
assa_detected = df_assa['Cumulative Detected'].to_numpy()
assa_hospitalised = df_assa['Hospitalised'].to_numpy()
assa_icu = df_assa['ICU'].to_numpy()
assa_dead = df_assa['Dead'].to_numpy()
assa_time = (df_assa['Day'] - | pd.to_datetime('2020-03-27') | pandas.to_datetime |
import numpy as np
import pandas as pd
from relation import *
from connection import *
class RuleSet:
#############################
# Methods to build rule set #
#############################
def __init__(self,rules_list):
''' @rules_list: list of ordered dictionnaries each representing a rule with their attributes as keys'''
self.set = pd.DataFrame(rules_list)
if len(rules_list) > 0:
self.m = len(rules_list[0]) #number of attributes (considering the recommendation)
else:
self.m = 0
self.n = len(rules_list) #number of rules
self.idm = np.empty(0) #Inter-Difference Matrix
self.pm = np.empty(0) #Product Matrix (holds products of IDC for each pair of rules)
self.attr_names = self.set.columns.tolist()
def build_IDM(self):
if self.n > 1: #needs at least two rules to compare them
self.idm = np.zeros((self.m, self.n, self.n))
#fill in IDC's for all attributes relationships
for k in range(1,self.m):
for i in range(self.n):
for j in range(i+1,self.n):
self.idm[k,i,j] = self._val_IDC(self.set.iloc[i,k],self.set.iloc[j,k])
#fill in IDC's for recommendation relationships
for i in range(self.n):
for j in range(i+1,self.n):
self.idm[0,i,j] = self._rec_IDC(self.set.iloc[i,0],self.set.iloc[j,0])
def build_PM(self):
if(len(self.idm) > 0):
self.pm = np.prod(self.idm,axis=0)
return True
else:
return False
# Helper methods to build rule set
def _val_IDC(self, val1, val2):
#Check for nan values
if pd.isna(val1) and pd.isna(val2):
return Relation.EQUALITY.value #val1 and val2 are both nan
elif pd.isna(val1): # and not pd.isna(val2):
return Relation.INCLUSION_JI.value #val1 is nan and val2 is included in it
elif pd.isna(val2):
return Relation.INCLUSION_IJ.value #val2 is nan and val1 is included in it
#check for type mismatch
elif not self.same_type(val1,val2):
raise TypeError("val1 and val2 should have the same type when neither of them are NaN. val1: "+str(type(val1))+str(val1) + " val2: "+str(type(val2))+str(val2))
#Both values are valid (not nan) and have the same type
elif isinstance(val1,pd.Interval):
return self._intervals_IDC_(val1,val2)
else:
if val1 == val2:
return Relation.EQUALITY.value
else:
return Relation.DIFFERENCE.value
def _intervals_IDC_(self,val1,val2):
INCL_IJ = Relation.INCLUSION_IJ.value
INCL_JI = Relation.INCLUSION_JI.value
if val1.left > val2.left:
val = val1; val1 = val2; val2 = val
INCL_IJ = Relation.INCLUSION_JI.value
INCL_JI = Relation.INCLUSION_IJ.value
#val1.left <= val2.left
if not val1.overlaps(val2):
return Relation.DIFFERENCE.value
else:
if val1.left == val2.left:
if val1.right == val2.right:
if val1.closed == val2.closed:
#print('c')
return Relation.EQUALITY.value # a,b a,b
elif self._isclosed(val1,'left') and self._isclosed(val1,'right'):
#print('d')
return INCL_JI #val1 = [a,b], val2 included
elif self._isclosed(val2,'left') and self._isclosed(val2,'right'):
#print('e')
return INCL_IJ #val2 = [a,b], val1 included
elif not self._isclosed(val1,'left') and not self._isclosed(val1,'right'):
#print('f')
return INCL_IJ #val1 = (a,b), val1 included
elif not self._isclosed(val2,'left') and not self._isclosed(val2,'right'):
#print('g')
return INCL_JI #val2 = (a,b), val2 included
else:
#print('h')
return Relation.OVERLAP.value # (a,b] [a,b)
else:
if not (self._isclosed(val1,'left') == self._isclosed(val2,'left')):
#print('m')
return Relation.OVERLAP.value # (a,b [a,c with c > b
else:
if val1.right < val2.right:
#print('i')
return INCL_IJ #a,b a,c with b < c
else:
#print('j')
return INCL_JI #a,b a,c with b > c
#val1.left < val2.left
elif val1.right >= val2.right:
#print('k')
return INCL_JI
else:
#print('l')
return Relation.OVERLAP.value
def _isclosed(self,inter,side):
''' Return true if interval 'inter' is closed on side 'side', False if it is open on that side
@inter: an interval pandas.Interval
@side: 'left', 'right', 'both' or 'neither'
'''
if inter.closed == side or inter.closed == 'both':
return True
else:
return False
def _rec_IDC(self, rec1, rec2):
if(rec1 == rec2):
return Relation.SAME_REC.value
else:
return Relation.DIFF_REC.value
#############################################
# Methods to get information about rule set #
#############################################
def get_val(self,rule,attr):
''' returns the value that 'rule' has for attribute 'attr'
@rule: index of the rule (int)
@attr: either index (int) or name (str) of the attribute
'''
if type(rule) is not int or rule < 0 or rule >= self.n:
raise ValueError("'rule' must be an integer in [0,"+str(self.n-1)+"]")
if type(attr) is str:
if attr not in self.attr_names:
raise ValueError("Attribute given doesn't exist. attr="+attr)
return self.set[attr][rule]
elif type(attr) is int and attr >= 0 and attr < self.m:
#print("set before set.iloc:")
#print(self.set)
return self.set.iloc[rule,attr]
else:
raise ValueError("'attr' must be either an integer in [0,"+str(self.m-1)+"] or an existing attribute name")
def connection(self, r1, r2):
''' Returns the Relation enum that corresponds to the relation between rules with indexes 'r1' and 'r2'
or the enum ERROR if the PM matrix hasn't been built yet
rules indexes start at 0; r1 and r2 may be given in any order
'''
if len(self.pm) == 0: #build_PM needs to be called to create PM matrix
return Connection.ERROR
elif r1 >= self.n or r2 >= self.n:
raise ValueError("indexes given for connections are too high r1:"+str(r1)+" r2:"+str(r1)+" maxVal:"+str(self.n-1))
else:
if r1 == r2:
return Connection.REFERENCE
if r1 > r2:
r = r1; r1 = r2; r2 = r
p = self.pm[r1][r2]
#print("r1: "+str(r1)+" r2: "+str(r2)+" p: "+str(p))
if p == Relation.DIFFERENCE.value: return Connection.DISCONNECTED
elif p == Relation.EQUALITY.value : return Connection.EQUAL_SAME
elif p == -Relation.EQUALITY.value : return Connection.EQUAL_DIFF
elif p % Relation.OVERLAP.value == 0 and p > 0: return Connection.OVERLAP_SAME
elif p % Relation.OVERLAP.value == 0 and p < 0 : return Connection.OVERLAP_DIFF
elif (p % Relation.INCLUSION_IJ.value == 0 or p % Relation.INCLUSION_JI.value == 0) and p*Relation.SAME_REC.value > 0 : return Connection.INCLUSION_SAME
elif (p % Relation.INCLUSION_IJ.value == 0 or p % Relation.INCLUSION_JI.value == 0) and p*Relation.DIFF_REC.value > 0 : return Connection.INCLUSION_DIFF
else:
raise ValueError("pm has illegal value at indices ["+str(r1)+","+str(r2)+']')
def same_type(self,val1,val2):
''' Redefine same type relationships to ignore difference between numpy and regular types '''
if type(val1) == type(val2):
return True
elif (isinstance(val1,np.bool_) and isinstance(val2,bool)) or (isinstance(val1,bool) and isinstance(val2,np.bool_)):
return True
elif (isinstance(val1,np.float64) and isinstance(val2,float)) or (isinstance(val1,float) and isinstance(val2,np.float64)):
return True
else:
return False
def has_type(self,val,checked_type):
if isinstance(val,checked_type):
return True
elif ((checked_type == bool) and isinstance(val,np.bool_)) or ((checked_type == np.bool_) and isinstance(val,bool)):
return True
elif ((checked_type == float) and isinstance(val,np.float64)) or ((checked_type == np.float64) and isinstance(val,float)):
return True
else:
return False
def __str__(self):
header = "Rules in set:\n"
size = "AttributeNbr: " + str(self.m) + "RulesNbr: " + str(self.n) + "\n"
return header + str(self.set)
###############################
# Methods to modifiy rule set #
###############################
def recompute_m(self):
''' recompute the matrix idm and pm if they already exist'''
if len(self.idm) > 0:
self.build_IDM()
if len(self.pm) > 0:
self.build_PM()
def update_idm(self,rule,attr):
if len(self.idm) > 0:
#update idm
for i in range(self.n):
#update of regular value
if attr > 0 and i < rule:
self.idm[attr,i,rule] = self._val_IDC(self.set.iloc[i,attr],self.set.iloc[rule,attr])
elif attr > 0 and i > rule:
self.idm[attr,rule,i] = self._val_IDC(self.set.iloc[rule,attr],self.set.iloc[i,attr])
#update of recommendation
if attr == 0 and i < rule:
self.idm[attr,i,rule] = self._rec_IDC(self.set.iloc[i,attr],self.set.iloc[rule,attr])
elif attr == 0 and i > rule:
#print("update_idm : i="+str(i)+" attr="+str(attr)+"rule="+str(rule)+str(self.n))
#print("-- set in update_idm --")
#print(self.set)
#print("-- ism in update_idm --")
#print(self.idm)
self.idm[attr,rule,i] = self._rec_IDC(self.set.iloc[i,attr],self.set.iloc[rule,attr])
def update_pm(self,rule):
if len(self.pm) > 0:
#update pm
self.pm[rule,:] = np.prod(self.idm[:,rule,:],axis=0)
self.pm[:,rule] = np.prod(self.idm[:,:,rule],axis=0)
def update_val(self, rule, attr, val, update=True):
''' Update value of an attribute by setting value in position [rule,attr] in de DataFrame rules to value val
if update = True, recompute self.idm and self.pm, leave them unchanged otherwise
rule and attr must be int with rule < n and 0 <attr < m
val must either be nan or have the same type as the rest of the values in the column
(No checks are performed on val for performance reasons)
(Method designed for attributes and not for the recommendation)
'''
if attr >= self.m or rule >= self.n:
raise ValueError("Index condition not respected: rule ("+str(rule)+") must be lower than "+str(self.n)+" and attr ("+str(attr)+") must be lower than "+str(self.m))
self.set.iloc[rule,attr] = val
if update:
self.update_idm(rule,attr)
self.update_pm(rule)
def update_attr(self,attr_list):
''' update attr by giving a new attr list
'''
for i in range(len(attr_list)):
if attr_list[i] == '':
raise ValueError("Attribute name cannot be an empty string.")
for j in range(i+1,len(attr_list)):
if attr_list[i] == attr_list[j]:
raise ValueError("Two attributes can't have the same name.")
if attr_list[0] != 'Rec' and attr_list[0] != 'Recommendation':
raise ValueError("First column must have name 'Rec' or 'Recommendation")
self.set.columns = attr_list
self.attr_names = attr_list
'''
def update_attr(self,new_attr,position):
if new_attr in self.attr_names and new_attr!=self.attr_names[position]:
raise ValueError("New attribute name can't be the same as an already existing one.")
if position == 0 and (new_attr == 'Rec' or new_attr == 'Recommendation'):
raise ValueError("First column must have name 'Rec' or 'Recommendation")
self.attr_names[position] = new_attr
self.set.columns = self.attr_names
'''
def add_attr(self,attr_name,val_list=None):
''' @val_list: list containing the value of that attribute for each rule
verification of correct type is not guaranteed because is only done if idm > 0
'''
if self.n == 0:
raise ValueError("Cannot add attribute to empty ruleset. Add a rule first.")
if attr_name == '':
raise ValueError("Attribute name cannot be an empty string.")
if attr_name in self.attr_names:
raise ValueError("The new attribute name must not already be used. Error with attr_name="+str(attr_name))
if val_list is None:
self.set[attr_name] = pd.Series(float('nan'),index=range(self.n))
#new idm layer for this attr has 1's for all rules since all values are the same
if len(self.idm) > 0:
new_idm_layer = np.zeros((1,self.n,self.n))
for i in range(1,self.n):
for j in range(i+1,self.n):
new_idm_layer[0,i,j] = 1
self.idm = np.concatenate((self.idm,new_idm_layer))
#No changes to pm needed since all new values are one's
else:
if len(val_list) != self.n:
raise ValueError("Length of list value ("+str(len(val_list))+") must be equal to number of rules ("+str(self.n)+")")
self.set[attr_name] = pd.Series(val_list)
#build new idm layer and add it to idm
if len(self.idm) > 0:
new_idm_layer = np.zeros((1,self.n,self.n))
for i in range(0,self.n):
for j in range(i+1,self.n):
try:
new_idm_layer[0,i,j] = self._val_IDC(self.set[attr_name][i],self.set[attr_name][j])
except TypeError:
raise TypeError("New attribute contains values with different types.")
self.idm = np.concatenate((self.idm,new_idm_layer))
#recompute pm
if len(self.pm) > 0:
self.build_PM()
self.m += 1
self.attr_names = self.set.columns.tolist()
def add_rule(self,rec,val_list=None):
''' @val_list: list containing the values for all attributes of this rule (recommendation excluded)
verification of correct type is not guaranteed because is only done if idm > 0
'''
if self.n == 0:
names = ['Recommendation']
values = [rec]
if val_list is not None:
for i in range(len(val_list)):
names += ['Attr '+str(i+1)]
values = [rec] + val_list
rule_dict = {k:v for k,v in zip(names,values)}
self.set = pd.DataFrame([rule_dict])[names]
self.m = len(names)
self.n = 1
self.attr_names = names
else:
old_n = self.n
new_n = self.n+1
self.n += 1 #update needs to be done before call to update_idm()
if val_list == None:
rule = pd.DataFrame(None,index=[old_n])
self.set = | pd.concat([self.set,rule],sort=False) | pandas.concat |
from time import time
from datetime import datetime
import os, sys
import numpy as np
from scipy.stats.mstats import gmean
import scipy.spatial.distance as ssd
import scipy.cluster.hierarchy as hc
import pandas as pd
import pickle
import gensim, data_nl_processing, data_nl_processing_v2
import spacy
import scispacy
from collections import OrderedDict
from sklearn import linear_model
from sklearn.manifold import TSNE
import glob
import re
#plotting tools
import math
import pyLDAvis
import pyLDAvis.gensim
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.ticker as ticker
from matplotlib import transforms
from mpl_toolkits.mplot3d import Axes3D
from wordcloud import WordCloud
from cycler import cycler
import seaborn as sns
TOPIC_NAMES_T40A25O200S524251838_FULL = {
1:"T1:Triage", 2:"T2:CT Imaging", 3:"T3:Case Presentation", 4:"T4:Outcomes", 5:"T5:Pediatrics",
6:"T6:Survey Methodology", 7:"T7:Drugs/Toxicology", 8:"T8:Mental Health/Substance Abuse",
9:"T9:Sedatives", 10:"T10:Infection", 11:"T11:Residency", 12:"T12:Lab/Basic Science Research",
13:"T13:Prehospital Care", 14:"T14:Trauma Care", 15:"T15:Chart Review/Electronic Record",
16:"T16:Study Protocol/Methodology", 17:"T17:Operations/Outcomes", 18:"T18:Public Health/Disaster Response",
19:"T19:Stroke Management", 20:"T20:Statistics-Test Characteristics", 21:"T21:Screening Measures in ED",
22:"T22:Operation Metrics", 23:"T23:Health Care Utilization", 24:"T24:IV Placement", 25:"T25:Wound Care",
26:"T26:Chest Pain", 27:"T27:Study Subject Parameters", 28:"T28:Intubation", 29:"T29:Ultrasound",
30:"T30:Blood Pressure/O2 Saturation", 31:"T31:CPR", 32:"T32:Resident Training", 33:"T33:Cardiac Arrest",
34:"T34:Risk Factors", 35:"T35:Academic Research", 36:"T36:Pain", 37:"T37:Lab Tests",
38:"T38:Statistical Models/Prediction", 39:"T39:Procedures", 40:"T40:Injury"
}
TOPIC_NAMES_T40A25O200S524251838_TRUNC = {
1:"T1:Triage", 2:"T2:CT Imaging", 3:"T3:Case Pres", 4:"T4:Outcomes", 5:"T5:Pediatrics",
6:"T6:Survey Meth", 7:"T7:Drugs/Tox", 8:"T8:Mental H/SA",
9:"T9:Sedatives", 10:"T10:Infection", 11:"T11:Residency", 12:"T12:Lab/B Sci",
13:"T13:Prehosp Ca", 14:"T14:Trauma Ca", 15:"T15:Chart R/EMR",
16:"T16:Study Meth", 17:"T17:Ops/Outcome", 18:"T18:Publ H/Dis",
19:"T19:Stroke Mng", 20:"T20:St-Test Cha", 21:"T21:Screening",
22:"T22:Op Metrics", 23:"T23:Health Util", 24:"T24:IV Place", 25:"T25:Wound Care",
26:"T26:Chest Pain", 27:"T27:Subj Para", 28:"T28:Intubation", 29:"T29:Ultrasound",
30:"T30:BP/O2Sat", 31:"T31:CPR", 32:"T32:Res Train", 33:"T33:Card Arrest",
34:"T34:Risk Fact", 35:"T35:Acad Res", 36:"T36:Pain", 37:"T37:Lab Tests",
38:"T38:St Mod/Pred", 39:"T39:Procedures", 40:"T40:Injury"
}
TOPIC_NAMES_T40A25O200S524251838 = {1:"T1:Triage", 2:"T2:CT Imaging", 3:"T3:Case Reports", 4:"T4:Sepsis", 5:"T5:Peds/Asthma",
6:"T6:Surveys", 7:"T7:Drug Toxicity", 8:"T8:Psychiatry", 9:"T9:Sedatives", 10:"T10:Antibiotics", 11:"T11:Residency",
12:"T12:Animal Exp", 13:"T13:EMS", 14:"T14:Trauma 1", 15:"T15:EMR", 16:"T16:Studies",
17:"T17:Disposition", 18:"T18:Health Sys", 19:"T19:Stroke", 20:"T20:Statistics 1", 21:"T21:Screening",
22:"T22:Length Stay", 23:"T23:Utilization", 24:"T24:IV/A Access", 25:"T25:Wound Care", 26:"T26:Chest Pain",
27:"T27:Vitals", 28:"T28:Intubation", 29:"T29:Ultrasound", 30:"T30:BP Measure", 31:"T31:CPR",
32:"T32:Med Ed", 33:"T33:Card Arrest", 34:"T34:Statistics 2", 35:"T35:Med Research",
36:"T36:Pain", 37:"T37:Blood Tests", 38:"T38:Modeling", 39:"T39:Procedures", 40:"T40:Trauma"}
TOPIC_NAMES_T40A5O200S629740313 = {
1:"T1:Statistical Modeling and Prediction", 2:"T2:Trauma Imaging", 3:"T3:Statistics: Measurement and Agreement",
4:"T4:Case Presentation and Diagnosis", 5:"T5:Chest Pain",
6:"T6:Clinical trial", 7:"T7:Trauma Severity and Outcomes", 8:"T8:Wound Care",
9:"T9:Toxicology", 10:"T10:Intubation and Airway Management", 11:"T11:Medical Publication", 12:"T12:Pediatrics",
13:"T13:Laboratory Tests", 14:"T14:Vitals", 15:"T15:Temperature Management",
16:"T16:Motor Vehicle Collision Related Injury", 17:"T17:Public Health and Disaster Medicine", 18:"T18:Health Utilization",
19:"T19:CPR", 20:"T20:Ultrasound", 21:"T21:Sedation",
22:"T22:Chart Review and Electronic Medical Records", 23:"T23:CT Imaging", 24:"T24:Risk Factor Analysis", 25:"T25:IV Placement",
26:"T26:Disposition", 27:"T27:Medical Education Assessment and Simulation", 28:"T28:Intracranial Hemorrhage and Stroke",
29:"T29:Pain and Pain Management", 30:"T30:Sepsis", 31:"T31:Residency Training", 32:"T32:Operational Metrics",
33:"T33:Academic Research", 34:"T34:Cardiac Arrest", 35:"T35:Survey Methodology", 36:"T36:Prehospital Care",
37:"T37:Mental Health and Substance Abuse", 38:"T38:Lab Research and Basic Science", 39:"T39:Treatment", 40:"T40:Infection"
}
TOPIC_NAMES_T40A5O200S629740313_TRUNC = {
1:"T1:St Mod/Pred", 2:"T2:Fracture", 3:"T3:Statistics", 4:"T4:Case Pres", 5:"T5:ACS",
6:"T6:Clin trial", 7:"T7:Trauma", 8:"T8:Wound Care",
9:"T9:Drugs/Tox", 10:"T10:Intubation", 11:"T11:Med Pub", 12:"T12:Pediatrics",
13:"T13:Lab Tests", 14:"T14:BP/O2Sat", 15:"T15:Body Temp",
16:"T16:Injury", 17:"T17:Publ H/Dis", 18:"T18:Health Util",
19:"T19:CPR", 20:"T20:Ultrasound", 21:"T21:Sedation",
22:"T22:Chart R/EMR", 23:"T23:Radiology", 24:"T24:Risk Fact", 25:"T25:IV Place",
26:"T26:Disposition", 27:"T27:Med Ed", 28:"T28:Stroke/Bleed", 29:"T29:Pain",
30:"T30:Outcomes", 31:"T31:Res Train", 32:"T32:Op Metrics", 33:"T33:Acad Res",
34:"T34:Card Arrest", 35:"T35:Survey Meth", 36:"T36:Prehosp Ca", 37:"T37:Mental H/SA",
38:"T38:Lab/B Sci", 39:"T39:Treatment", 40:"T40:Infection"
}
TOPIC_NAMES_T40A25O200S629740313_TRUNC = {
1:"T1:Adverse Event", 2:"T2:Injury", 3:"T3:Procedures", 4:"T4:Disposition", 5:"T5:ECG",
6:"T6:Clinical Trial", 7:"T7:Mental H/SA", 8:"T8:Outcomes",
9:"T9:Op Metrics", 10:"T10:Chest Pain", 11:"T11:St Mod/Pred", 12:"T12:Intubation",
13:"T13:Survey Meth", 14:"T14:St-Test Cha", 15:"T15:Analgesia",
16:"T16:Infect/Wound", 17:"T17:H/C Costs", 18:"T18:Health Util",
19:"T19:Lab/B Sci", 20:"T20:Lab Tests", 21:"T21:Ultrasound",
22:"T22:Prehosp Care", 23:"T23:Trauma", 24:"T24:Treatment", 25:"T25:Acad Res",
26:"T26:Screening", 27:"T27:Pediatrics", 28:"T28:Case Pres", 29:"T29:Chart R/EMR",
30:"T30:Drugs/Tox", 31:"T31:Res Train", 32:"T32:Residency", 33:"T33:Intervention",
34:"T34:CT Imaging", 35:"T35:Demographics", 36:"T36:Risk Fact", 37:"T37:Disaster Med",
38:"T38:Card Arrest", 39:"T39:CPR", 40:"T40:Vitals"
}
MAIN_TOPICS_V2 = TOPIC_NAMES_T40A5O200S629740313_TRUNC
MAIN_TOPICS_V3 = TOPIC_NAMES_T40A25O200S629740313_TRUNC
MAIN_TOPICS = TOPIC_NAMES_T40A25O200S524251838_FULL
MAIN_TOPICS_TRUNC = TOPIC_NAMES_T40A25O200S524251838_TRUNC
TOPIC_GROUPS = {
"Administration":[26, 30, 32],
"Cards":[5, 34],
"EMS":[36],
"Infection":[40],
"Med Ed":[27, 31],
"Mental Health":[37],
"Pain":[21, 29],
"Peds":[12],
"Public Health":[17, 18],
"Radiology":[20, 23],
"Rescuscitation":[10, 14, 15, 19, 25],
"Stroke":[28],
"Toxicology":[9],
"Trauma":[2, 7, 8, 16],
"Methods":[1, 3, 6, 13, 22, 24, 35, 38, 39],
"Miscellaneous":[4, 11, 33]
}
def plot_model_comparison(paths, x_column, y_columns, x_label, y_label, graph_title, show=True, fig_save_path=None, csv_save_path=None):
# Main variables
data_dict = {}
mean_sd_dict = {}
x_data = None
# Setup data_dict y_column keys
for column in y_columns:
data_dict[column] = {}
# Read each file in paths
for path in paths:
df = pd.read_csv(path)
# Setup data_dict x keys and values if not yet done
if x_data is None:
x_data = df[x_column].tolist()
for column in y_columns:
for x in x_data:
data_dict[column][x] = []
# Add file's data to list in data_dict
for column in y_columns:
data = df[column].tolist()
for x in x_data:
data_dict[column][x].append(data.pop(0))
# Calculate mean and Standard deviation for each y value
for y_column in data_dict:
mean_sd_dict[y_column] = {'X':[], 'MEAN':[], 'STDV':[]}
for x in data_dict[y_column]:
mean_sd_dict[y_column]['X'].append(x)
mean_sd_dict[y_column]['MEAN'].append(np.mean(data_dict[y_column][x]))
mean_sd_dict[y_column]['STDV'].append(np.std(data_dict[y_column][x]))
# Plot graph of x VS y with standard deviation for error bars
plt.figure(figsize=(12, 8))
for y_column in mean_sd_dict:
plt.errorbar(mean_sd_dict[y_column]['X'], mean_sd_dict[y_column]['MEAN'],
yerr=mean_sd_dict[y_column]['STDV'], label=y_column,
marker='o', markersize=5, capsize=5, markeredgewidth=1)
plt.title(graph_title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(title='Models', loc='best')
# Saving figure if fig_save_path is entered
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
# Saving a CSV file of the means and standard deviations if csv_save_path is entered
if csv_save_path is not None:
dataframe_dict= {}
for y_column in y_columns:
dataframe_dict[x_column] = mean_sd_dict[y_column]['X']
dataframe_dict[" ".join([y_column, "MEAN"])] = mean_sd_dict[y_column]['MEAN']
dataframe_dict[" ".join([y_column, "STDV"])] = mean_sd_dict[y_column]['STDV']
data = pd.DataFrame.from_dict(dataframe_dict)
data.to_csv(csv_save_path, index=False)
if show:
plt.show()
plt.close() # Closes and deletes graph to free up memory
def dominant_doc_topic_df(model, nlp_data, num_keywords=10):
topics_df = pd.DataFrame()
for i, row_list in enumerate(model[nlp_data.gensim_lda_input()]):
row = row_list[0] if model.per_word_topics else row_list
row = sorted(row, key=lambda x:(x[1]), reverse=True)
for j, (topic_num, prop_topic) in enumerate(row):
if j==0:
wp = model.show_topic(topic_num, topn=num_keywords)
topic_keywords = ", ".join([word for word, prop in wp])
topics_df = topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
topics_df.columns = ["Dominant Topic", "Contribution", "Topic Keywords"]
contents = pd.Series(nlp_data.get_token_text())
topics_df = pd.concat([topics_df, contents], axis=1)
topics_df = topics_df.reset_index()
topics_df.columns = ["Document", "Dominant Topic", "Contribution", "Topic Keywords", "Document Tokens"]
topics_df["Document"] += 1
topics_df["Dominant Topic"] = 1 + topics_df["Dominant Topic"].astype(int)
return topics_df
def best_doc_for_topic(dom_top_df):
sorted_df = pd.DataFrame()
dom_top_df_grouped = dom_top_df.groupby('Dominant Topic')
for i, grp in dom_top_df_grouped:
sorted_df = pd.concat([sorted_df, grp.sort_values(['Contribution'], ascending=False).head(1)], axis=0)
sorted_df.reset_index(drop=True, inplace=True)
sorted_df.columns = ["Best Document", "Topic Number", "Contribution", "Topic Keywords", "Document Tokens"]
sorted_df = sorted_df[["Topic Number", "Contribution", "Topic Keywords", "Best Document", "Document Tokens"]]
return sorted_df
def plot_doc_token_counts_old(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = 500, color='navy')
# Prints texts on the graph at x=400
x = 400
plt.text(x, 120, "Documents")
text = plt.text(x, 110, "Total Tokens")
plt.text(x, 100, "Mean")
plt.text(x, 90, "Median")
plt.text(x, 80, "Stdev")
plt.text(x, 70, "1%ile")
plt.text(x, 60, "99%ile")
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 120, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 110, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 100, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 90, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 80, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 70, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 60, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, 500), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_doc_token_counts(dom_top_df=None, nlp_data=None, show=True, fig_save_path=None, bins=None):
if dom_top_df is not None:
doc_lens = [len(doc) for doc in dom_top_df["Document Tokens"]]
if nlp_data is not None:
doc_lens = np.array(nlp_data.sklearn_lda_input().sum(axis=1)).flatten()
if bins is None:
bins = 50 * math.ceil(max(doc_lens)/50)
if max(doc_lens) - np.quantile(doc_lens, q=0.99) < bins * 0.2:
bins += 50 * math.ceil((bins*0.25)/50)
bin_list = [i+1 for i in range(bins)]
fig = plt.figure(figsize=(12,7), dpi=160)
plt.hist(doc_lens, bins = bin_list, color='navy', rwidth=None)
# Prints texts on the graph at position x
x = 0.79
t = fig.transFigure
plt.text(x, 0.88, "Documents", transform=t)
text = plt.text(x, 0.85, "Total Tokens", transform=t)
plt.text(x, 0.82, "Mean", transform=t)
plt.text(x, 0.79, "Median", transform=t)
plt.text(x, 0.76, "Stdev", transform=t)
plt.text(x, 0.73, "1%ile", transform=t)
plt.text(x, 0.70, "99%ile", transform=t)
#This is for offsetting the data so it will appear even
canvas = fig.canvas
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
# This prints the statistics
plt.text(x, 0.88, " : " + str(len(doc_lens)), transform=t)
plt.text(x, 0.85, " : " + str(np.sum(doc_lens)), transform=t)
plt.text(x, 0.82, " : " + str(round(np.mean(doc_lens), 1)), transform=t)
plt.text(x, 0.79, " : " + str(round(np.median(doc_lens), 1)), transform=t)
plt.text(x, 0.76, " : " + str(round(np.std(doc_lens),1)), transform=t)
plt.text(x, 0.73, " : " + str(np.quantile(doc_lens, q=0.01)), transform=t)
plt.text(x, 0.70, " : " + str(np.quantile(doc_lens, q=0.99)), transform=t)
plt.gca().set(xlim=(0, bins), ylabel='Number of Documents', xlabel='Document Token Count')
plt.tick_params(size=16)
#plt.xticks(np.linspace(0,500,11))
plt.title('Distribution of Document Token Counts', fontdict=dict(size=22))
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def create_wordcloud(topic, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
cloud.generate_from_frequencies(dict(topics[topic-1][1]), max_font_size=300)
plt.figure(figsize=(2,2), dpi=fig_dpi)
plt.imshow(cloud)
if topic_names is None:
plt.title('Topic {}'.format(topic+1), fontdict=dict(size=16), pad=10)
else:
plt.title(topic_names[topic+1], fontdict=dict(size=16), pad=10)
plt.axis('off')
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show: # It shows ugly but the actual save file looks good.
plt.show()
plt.close()
def create_multi_wordclouds(n_topics, n_horiz, model, nlp_data, seed=100, num_w=20, fig_dpi=400, topic_names=None, title_font=15,
show=True, fig_save_path=None, colormap='tab10', horizontal_pref=0.8):
if isinstance(n_topics, int):
topics_list = list(range(n_topics))
else:
topics_list = [i-1 for i in n_topics]
n_topics = len(topics_list)
cloud = WordCloud(background_color='white', width=1000, height=1000, max_words=num_w, colormap=colormap,
prefer_horizontal=horizontal_pref, random_state=seed)
topics = model.show_topics(num_topics=-1, num_words=num_w, formatted=False)
x_len = n_horiz
y_len = math.ceil(n_topics/n_horiz)
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len,2*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_topics:
fig.add_subplot(ax)
topic_words = dict(topics[topics_list[i]][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
if topic_names is None:
plt.gca().set_title('Topic {}'.format(topics_list[i]+1), fontdict=dict(size=title_font), pad=10)
else:
plt.gca().set_title(topic_names[topics_list[i]+1], fontdict=dict(size=title_font), pad=10)
plt.gca().axis('off')
else:
fig.add_subplot(ax)
plt.gca().axis('off')
#plt.suptitle('Topic Wordclouds', fontdict=dict(size=16))
plt.axis('off')
plt.margins(x=0, y=0)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def color_doc_topics_old(model, doc, nlp_data, line_word_length=10, dpi=150, show=True, fig_save_path=None, topics=5, min_phi=None,
incl_periods=True, topic_names=None, incl_perc=False): # The output file looks better than show
colors = [color for name, color in mcolors.TABLEAU_COLORS.items()]
if topics > 10:
topics = 10
doc_prep = gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=2, max_len=30)
doc_raw = gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=1, max_len=30)
doc_split = str(doc).split('.')
doc_raw_period = []
for sentence in doc_split:
sentence_tok = gensim.utils.simple_preprocess(str(sentence), deacc=True, min_len=1, max_len=30)
if len(sentence_tok) > 0:
sentence_tok[0] = sentence_tok[0].capitalize()
sentence_tok[-1] += '.'
doc_raw_period += sentence_tok
wordset = set(doc_raw)
doc_index_dict = {}
for word in wordset:
word_indexes = [i for i, w in enumerate(doc_raw) if w == word]
doc_index_dict[word] = word_indexes
token_index_dict = {}
token_list = []
nlp = spacy.load(nlp_data.spacy_lib, disable=['parser','ner'])
allowed_postags = ['NOUN', 'ADJ', 'VERB','ADV']
for word in doc_prep:
if word not in nlp_data.stopwords:
token = nlp(word)[0]
if token.pos_ in allowed_postags and token.lemma_ not in ['-PRON-']:
token_list.append(token.lemma_)
if token.lemma_ in token_index_dict:
token_index_dict[token.lemma_] = list(set(token_index_dict[token.lemma_] + doc_index_dict[word]))
else:
token_index_dict[token.lemma_] = doc_index_dict[word]
for token in token_index_dict:
token_index_dict[token] = sorted(set(token_index_dict[token]))
processed_tokens = nlp_data.process_ngrams_([token_list])[0]
final_token_dict = {}
for token in processed_tokens:
if token not in final_token_dict:
final_token_dict[token] = []
split_tokens = token.split('_')
for split_token in split_tokens:
final_token_dict[token].append(token_index_dict[split_token].pop(0))
topic_perc, wordid_topics, wordid_phivalues = model.get_document_topics(
nlp_data.gensim_lda_input([" ".join(processed_tokens)])[0], per_word_topics=True,
minimum_probability=0.001, minimum_phi_value=min_phi)
topic_perc_sorted = sorted(topic_perc, key=lambda x:(x[1]), reverse=True)
top_topics = [topic[0] for i, topic in enumerate(topic_perc_sorted) if i < topics]
top_topics_color = {top_topics[i]:i for i in range(len(top_topics))}
word_dom_topic = {}
for wd, wd_topics in wordid_topics:
for topic in wd_topics:
if topic in top_topics:
word_dom_topic[model.id2word[wd]] = topic
break
index_color_dict = {}
for token in final_token_dict:
if token in word_dom_topic:
for i in final_token_dict[token]:
index_color_dict[i] = top_topics_color[word_dom_topic[token]]
add_lines = math.ceil(len(top_topics_color)/5)
lines = math.ceil(len(doc_raw) / line_word_length) + add_lines
fig, axes = plt.subplots(lines + 1, 1, figsize=(line_word_length, math.ceil(lines/2)), dpi=dpi,
squeeze=True, constrained_layout=True)
axes[0].axis('off')
plt.axis('off')
n = line_word_length
if len(doc_raw) == len(doc_raw_period) and incl_periods:
doc_raw = doc_raw_period
doc_raw_lines = [doc_raw[i * n:(i + 1) * n] for i in range(lines)]
indent = 0
for i, ax in enumerate(axes):
t = ax.transData
canvas = ax.figure.canvas
if i > add_lines:
x = 0.06
line = i - add_lines - 1
for index in range(len(doc_raw_lines[line])):
word = doc_raw_lines[line][index]
raw_index = index + (line) * n
if raw_index in index_color_dict:
color = colors[index_color_dict[raw_index]]
else:
color = 'black'
text = ax.text(x, 0.5, word+' ', horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
elif i < add_lines:
x = 0.06
if i == 0:
word = "Topics: "
color = 'black'
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
indent = ex.width
else:
color = 'black'
text = ax.text(x, 0.5, "", horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=indent, units='dots')
for num, index in enumerate(range(i*5, len(top_topics))):
if num < 5:
if topic_names is None:
word = "Topic {}, ".format(top_topics[index]+1)
else:
word = topic_names[top_topics[index]+1] + ", "
if incl_perc:
topic_perc_dict = dict(topic_perc_sorted)
word = "{:.1f}% ".format(topic_perc_dict[top_topics[index]]*100) + word
color = colors[top_topics_color[top_topics[index]]]
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
else:
ax.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.suptitle('Document Colored by Top {} Topics'.format(topics),
fontsize=22, y=0.95, fontweight=700)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def color_doc_topics(model, doc, nlp_data, max_chars=120, dpi=150, show=True, fig_save_path=None, topics=5, min_phi=None,
topic_names=None, incl_perc=False, highlight=False, highlight_topic_names=False):
# The output file looks better than show
colors = [color for name, color in mcolors.TABLEAU_COLORS.items()]
if topics > 10: # There are only 10 colors so the max is 10. Change above to add more colors for more topics
topics = 10
# This is for the lemmetazation step
doc_prep = gensim.utils.simple_preprocess(str(doc), deacc=True, min_len=2, max_len=30)
#This is for processing the string while retaining the original characters since simple_preprocess removes punctuation and accents
#It splits the string by ' ' and then individually processes the chunks into tokens and finds there location in the string
#Finally a list is made with strings that directly translate to tokens and preserves non-token strings
doc_raw_split = str(doc).split()
doc_raw_word_list = []
raw_token_dict = {}
for string_piece in doc_raw_split:
tokens = gensim.utils.simple_preprocess(str(string_piece), deacc=True, min_len=1, max_len=30)
working_string = gensim.utils.deaccent(string_piece.lower())
output_string = string_piece
for token in tokens:
if token in working_string:
start_index = working_string.find(token)
end_index = start_index + len(token)
front_part = output_string[:start_index]
token_part = output_string[start_index:end_index]
output_string = output_string[end_index:]
working_string = working_string[end_index:]
if len(front_part) > 0:
doc_raw_word_list.append(front_part)
raw_token_dict[front_part] = False
doc_raw_word_list.append(token_part)
raw_token_dict[token_part] = token
if len(output_string) > 0: # This saves strings that do not become tokens, False prevents them from being in the wordset
doc_raw_word_list.append(output_string)
raw_token_dict[output_string] = False
# This is for finding all index locations of the tokens within the original raw string list
wordset = set([raw_token_dict[word] for word in raw_token_dict.keys() if raw_token_dict[word]])
doc_index_dict = {}
for word in wordset:
word_indexes = [i for i, w in enumerate(doc_raw_word_list) if raw_token_dict[w] == word]
doc_index_dict[word] = word_indexes
token_index_dict = {}
token_list = []
# This is for lemmitazation of the text and linking the lemma to its original token index locations
nlp = spacy.load(nlp_data.spacy_lib, disable=['parser','ner'])
allowed_postags = ['NOUN', 'ADJ', 'VERB','ADV']
for word in doc_prep:
if word not in nlp_data.stopwords:
token = nlp(word)[0]
if token.pos_ in allowed_postags and token.lemma_ not in ['-PRON-']:
token_list.append(token.lemma_)
if token.lemma_ in token_index_dict:
token_index_dict[token.lemma_] = list(set(token_index_dict[token.lemma_] + doc_index_dict[word]))
else:
token_index_dict[token.lemma_] = doc_index_dict[word]
for token in token_index_dict:
token_index_dict[token] = sorted(set(token_index_dict[token]))
# This processes the n-grams based on the model's n-gram settings and combines index locations for the n-gram
processed_tokens = nlp_data.process_ngrams_([token_list])[0]
final_token_dict = {}
for token in processed_tokens:
if token not in final_token_dict:
final_token_dict[token] = []
split_tokens = token.split('_')
for split_token in split_tokens:
final_token_dict[token].append(token_index_dict[split_token].pop(0))
# This is where the text is processed by the model and the top n models are saved
topic_perc, wordid_topics, wordid_phivalues = model.get_document_topics(
nlp_data.gensim_lda_input([" ".join(processed_tokens)])[0], per_word_topics=True,
minimum_probability=0.001, minimum_phi_value=min_phi)
topic_perc_sorted = sorted(topic_perc, key=lambda x:(x[1]), reverse=True)
top_topics = [topic[0] for i, topic in enumerate(topic_perc_sorted) if i < topics]
top_topics_color = {top_topics[i]:i for i in range(len(top_topics))}
word_dom_topic = {}
# This links the individual word lemmas to its best topic within available topics
for wd, wd_topics in wordid_topics:
for topic in wd_topics:
if topic in top_topics:
word_dom_topic[model.id2word[wd]] = topic
break
# Links the index location to a color
index_color_dict = {}
for token in final_token_dict:
if token in word_dom_topic:
for i in final_token_dict[token]:
index_color_dict[i] = top_topics_color[word_dom_topic[token]]
# this is for assembling the individual lines of the graph based on character length and position of punctuation
add_lines = math.ceil(len(top_topics_color)/5)
last_index = len(doc_raw_word_list) - 1
line_len = 0
line_num = 0
doc_raw_lines = [[]]
no_space_list = [".", ",", ")", ":", "'"]
for i, word in enumerate(doc_raw_word_list):
word_len = len(word)
if line_len + word_len < max_chars or (word in no_space_list and line_len <= max_chars):
if word == '(':
if i != last_index:
if (line_len + word_len + len(doc_raw_word_list[i+1]) + 1 >= max_chars
and doc_raw_word_list[i+1] not in no_space_list):
line_num += 1
line_len = 0
doc_raw_lines.append([])
else:
line_num += 1
line_len = 0
doc_raw_lines.append([])
line_len += word_len + 1
doc_raw_lines[line_num].append(i)
line_num += 1
# This creates the figure and subplots
lines = line_num + add_lines
fig, axes = plt.subplots(lines + 1, 1, figsize=(math.ceil(max_chars/8), math.ceil(lines/2)), dpi=dpi,
squeeze=True, constrained_layout=True)
axes[0].axis('off')
plt.axis('off')
indent = 0
# This is the loop for drawing the text
for i, ax in enumerate(axes):
t = ax.transData
canvas = ax.figure.canvas
if i > add_lines:
x = 0.06
line = i - add_lines - 1
for index in doc_raw_lines[line]:
word = doc_raw_word_list[index]
if word[-1] == "(":
pass
elif index != last_index:
if doc_raw_word_list[index+1][0] not in no_space_list:
word = word + " "
if index in index_color_dict:
color = colors[index_color_dict[index]]
else:
color = 'black'
if highlight:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
elif i < add_lines:
x = 0.06
if i == 0:
word = "Topics: "
color = 'black'
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
indent = ex.width
else:
color = 'black'
text = ax.text(x, 0.5, "", horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=indent, units='dots')
for num, index in enumerate(range(i*5, len(top_topics))):
if num < 5:
if topic_names is None:
word = "Topic {}, ".format(top_topics[index]+1)
else:
word = topic_names[top_topics[index]+1] + ", "
if incl_perc:
topic_perc_dict = dict(topic_perc_sorted)
word = "{:.1f}% ".format(topic_perc_dict[top_topics[index]]*100) + word
color = colors[top_topics_color[top_topics[index]]]
if highlight_topic_names:
bbox=dict(facecolor=color, edgecolor=[0,0,0,0], pad=0, boxstyle='round')
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color='black',
transform=t, fontweight=700)
if color != 'black':
text.set_bbox(bbox)
else:
text = ax.text(x, 0.5, word, horizontalalignment='left',
verticalalignment='center', fontsize=16, color=color,
transform=t, fontweight=700)
text.draw(canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text.get_transform(), x=ex.width, units='dots')
ax.axis('off')
else:
ax.axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.suptitle('Document Colored by Top {} Topics'.format(topics),
fontsize=22, y=0.95, fontweight=700)
# This saves and/or shows the plot. Note: Saved file looke better than the drawn plot
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def docs_per_topic(model, nlp_data=None, doc_list=None, corpus=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
num_topics = model.num_topics
dominant_topics = []
topic_percantages = []
for i, corp in enumerate(corpus):
topic_perc, wordid_topics, wordidphvalues = model.get_document_topics(
corp, per_word_topics=True)
dominant_topic = sorted(topic_perc, key = lambda x: x[1], reverse=True)[0][0]
dominant_topics.append((i, dominant_topic))
topic_percantages.append(topic_perc)
df = pd.DataFrame(dominant_topics, columns=['Document', 'Dominant Topic'])
docs_by_dom_topic = df.groupby('Dominant Topic').size()
df_docs_by_dom_topic = docs_by_dom_topic.to_frame().reset_index()
df_docs_by_dom_topic.columns = ['Dominant Topic', 'Document Count']
present_topics = df_docs_by_dom_topic['Dominant Topic'].tolist()
absent_topics = [i for i in range(num_topics) if i not in present_topics]
add_rows = {'Dominant Topic':absent_topics, 'Document Count':[]}
for t in absent_topics:
add_rows['Document Count'].append(0)
if len(absent_topics) > 0:
df_add_rows = pd.DataFrame(add_rows)
df_docs_by_dom_topic = df_docs_by_dom_topic.append(df_add_rows, ignore_index=True)
df_docs_by_dom_topic.sort_values('Dominant Topic', inplace=True)
df_docs_by_dom_topic['Dominant Topic'] += 1
topic_weight_doc = pd.DataFrame([dict(t) for t in topic_percantages])
df_topic_weight_doc = topic_weight_doc.sum().to_frame().reset_index()
df_topic_weight_doc.columns = ['Topic', 'Document Weight']
present_topics = df_topic_weight_doc['Topic'].tolist()
absent_topics = [i for i in range(num_topics) if i not in present_topics]
add_rows = {'Topic':absent_topics, 'Document Weight':[]}
for t in absent_topics:
add_rows['Document Weight'].append(0.0)
if len(absent_topics) > 0:
df_add_rows = pd.DataFrame(add_rows)
df_topic_weight_doc = df_topic_weight_doc.append(df_add_rows, ignore_index=True)
df_topic_weight_doc['Topic'] += 1
df_topic_weight_doc.sort_values('Topic', inplace=True)
df_topic_weight_doc.reset_index(drop=True, inplace=True)
return df_docs_by_dom_topic, df_topic_weight_doc
def doc_topics_per_time(model, nlp_data, year_res=5, df=None, data_column=None, year_column=None, year_list=None,
year_start=None, year_end=None):
if df is not None:
data = nlp_data.process_new_corpus(df[data_column].tolist())['gensim']
year_list = df[year_column]
elif year_list is not None:
data = nlp_data.gensim_lda_input()
else:
print("No year/data given")
return None
grouped_df = pd.DataFrame(list(zip(data, year_list)), columns=['data', 'year']).groupby('year')
year_doc_dict = {}
for year, group in grouped_df:
if year_start is None:
year_doc_dict[int(year)] = group['data'].tolist()
elif year >= year_start:
year_doc_dict[int(year)] = group['data'].tolist()
years = sorted(year_doc_dict.keys())
final_year_doc_dict = {}
if year_start is None:
year_start = years[0]
if year_end is None:
year_end = years[-1]
all_years = list(range(year_start, year_end+1))
for year in all_years:
if year not in years:
final_year_doc_dict[year] = []
else:
final_year_doc_dict[year] = year_doc_dict[year]
years = sorted(final_year_doc_dict.keys())
intervals = {}
year_range = []
years_label = None
num_years = len(years)
num_intervals = math.ceil(num_years / year_res)
print("Number of years: {} \nNumber of intervals: {}".format(num_years, num_intervals))
n = year_res
for i in range(num_intervals):
index = i*n
year_range = [years[index] + num for num in range(year_res)]
if index + year_res <= num_years:
years_label = str(years[index]) + " to " + str(years[index + n - 1])
else:
years_label = str(years[index]) + " to " + str(years[-1])
intervals[years_label] = []
for year in year_range:
if year in years:
intervals[years_label].extend(final_year_doc_dict[year])
master_dict_tn = {}
master_dict_tw = {}
for key in intervals:
print("Processing {} docs from {}...".format(len(intervals[key]), key))
df_topic_num, df_topic_weights = docs_per_topic(model, corpus=intervals[key])
master_dict_tn['Topic'] = df_topic_num['Dominant Topic'].tolist()
master_dict_tn[key] = df_topic_num['Document Count'].tolist()
master_dict_tw['Topic'] = df_topic_weights['Topic'].tolist()
master_dict_tw[key] = df_topic_weights['Document Weight'].tolist()
df_doc_counts_by_year = pd.DataFrame(master_dict_tn)
df_doc_weights_by_year = pd.DataFrame(master_dict_tw)
return df_doc_counts_by_year, df_doc_weights_by_year
def plot_doc_topics_per_time(df_data, n_topics, n_horiz=5, fig_dpi=150, ylabel=None, xlabel=None, topic_names=None, show=True,
fig_save_path=None, relative_val=True, x_val=None, xtick_space=None, xmintick_space=None, hide_x_val=True,
df_data2=None, relative_val2=True, ylabel2=None, colors=['tab:blue', 'tab:orange'], linear_reg=False):
# df_data needs to be one of the outputs from doc_topics_per_time dataframes or data frame with topics in first column and labeled 'Topic'
columns = list(df_data.columns)[1:]
column_totals = df_data.loc[:,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
graphs = {}
graphs2 = {}
if isinstance(n_topics, int):
topics_list = list(range(1, n_topics + 1))
else:
topics_list = [i for i in n_topics].sort()
for topic in topics_list:
data = df_data.loc[df_data['Topic'] == topic, columns[0]:]
data2 = None
plot2 = False
if relative_val:
data = data / column_totals_list
data.fillna(0, inplace=True)
graphs[topic] = data.values.flatten().tolist()
else:
graphs[topic] = data.values.flatten().tolist()
if df_data2 is not None:
data2 = df_data2.loc[df_data2['Topic'] == topic, columns[0]:]
plot2 = True
if relative_val2:
data2 = data2 / column_totals_list
graphs2[topic] = data2.values.flatten().tolist()
else:
graphs2[topic] = data2.values.flatten().tolist()
# Plotting
x_len = n_horiz
y_len = math.ceil(len(topics_list)/n_horiz)
if x_val is None:
x_val = list(range(1, len(columns)+1))
diff_axis = False
if not relative_val == relative_val2:
diff_axis = True
ax2_list = []
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len, 1.5*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_topics:
ax.plot(x_val, graphs[topics_list[i]], color=colors[0])
if plot2 and diff_axis:
ax2 = ax.twinx()
ax2_list.append(ax2)
ax2_list[0].get_shared_y_axes().join(*ax2_list)
ax2.plot(x_val, graphs2[topics_list[i]], color=colors[1])
if (i + 1) % x_len > 0 and (i + 1) != len(topics_list):
ax2.set_yticklabels([])
elif plot2:
ax.plot(x_val, graphs2[topics_list[i]], color=colors[1])
if topic_names is not None:
ax.title.set_text(topic_names[i+1])
else:
ax.title.set_text('Topic {}'.format(topics_list[i]))
if xtick_space is not None: ax.xaxis.set_major_locator(ticker.MultipleLocator(xtick_space))
if xmintick_space is not None: ax.xaxis.set_minor_locator(ticker.MultipleLocator(xmintick_space))
if hide_x_val:ax.set_xticklabels([])
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
else:
ax.axis('off')
if plot2 and diff_axis and False:
print(len(ax2_list))
ax2_list[0].get_shared_y_axes().join(*ax2_list)
#plt.tight_layout()
if xlabel is not None:
fig.text(0.5, 0, xlabel, ha='center', va='top', fontsize=14)
if ylabel is not None:
fig.text(0, 0.5, ylabel, ha='right', va='center', fontsize=14, rotation=90)
if ylabel2 is not None and plot2 and diff_axis:
fig.text(1, 0.5, ylabel2, ha='left', va='center', fontsize=14, rotation=90)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
if linear_reg:
x = np.array(range(len(columns))).reshape(-1, 1)
lr_dict = {
'Topic':[],
'Coefficient':[],
'R^2':[]
}
for topic in graphs:
lin_reg_mod = linear_model.LinearRegression()
lin_reg_mod.fit(x, graphs[topic])
if topic_names is not None:
lr_dict['Topic'].append(topic_names[topic])
else:
lr_dict['Topic'].append(topic)
lr_dict['Coefficient'].append(lin_reg_mod.coef_[0])
lr_dict['R^2'].append(lin_reg_mod.score(x, graphs[topic]))
df_lr = pd.DataFrame(lr_dict)
return df_lr
def graph(x, y, title=None, x_label=None, y_label=None, show=False, fig_save_path=None):
plt.figure(figsize=(4,3), dpi=300)
plt.plot(x, y, marker='.')
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(rotation=30, ha='right')
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def graph_multi(x_list, y_list, label_list, legend=None, legend_params={'loc':'best'}, title=None, x_label=None, y_label=None, show=False, fig_save_path=None):
plt.figure(figsize=(4,3), dpi=300)
for i, label in enumerate(label_list):
plt.plot(x_list[i], y_list[i], label=label, marker='.')
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(title=legend, **legend_params)
plt.xticks(rotation=30, ha='right')
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_tsne_doc_cluster_obsolete(model, nlp_data, doc_list=None, corpus=None, min_tw=None, marker_size=1, seed=2020,
show=True, fig_save_path=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(n_topics):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus)):
temp_dict = {t:w for t, w in row_list}
for topic in range(n_topics):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
print(pd.DataFrame(topic_weights).fillna(0).head())
print(pd.DataFrame(topic_weights).head())
arr = pd.DataFrame(topic_weights).fillna(0).values
if min_tw is not None:
arr = arr[np.amax(arr, axis=1) >= min_tw]
topic_num = np.argmax(arr, axis=1)
tsne_model = TSNE(n_components=2, verbose=1, random_state=seed, angle=0.99, init='pca', n_jobs=-1)
tsne_lda = tsne_model.fit_transform(arr)
colors = np.array([color for name, color in mcolors.XKCD_COLORS.items()])
title = "t-SNE Clustering of {} Topics".format(n_topics)
x = tsne_lda[:,0]
y = tsne_lda[:,1]
color = colors[topic_num]
plt.figure(figsize=(6,6), dpi=300)
plt.scatter(x, y, color=color, marker='.', s=marker_size)
plt.title(title)
plt.xlabel("Component 1")
plt.ylabel("Component 2")
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_tsne_doc_cluster_old(model, nlp_data, doc_list=None, corpus=None, min_tw=None, marker_size=1, seed=2020,
dpi=450, show_topics=False, custom_titles=None, show=True, fig_save_path=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(n_topics):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus)):
temp_dict = {t:w for t, w in row_list}
for topic in range(n_topics):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
arr = pd.DataFrame(topic_weights).fillna(0).values
if min_tw is not None:
arr = arr[np.amax(arr, axis=1) >= min_tw]
topic_num = np.argmax(arr, axis=1)
tsne_model = TSNE(n_components=2, verbose=1, random_state=seed, angle=0.99, init='pca', n_jobs=-1)
tsne_lda = tsne_model.fit_transform(arr)
# Calculate geometric mean of doc coordinates to place topic titles
topic_positions = {}
if show_topics:
for topic in range(n_topics):
topic_arr = topic_num == topic
coord_arr = tsne_lda[topic_arr]
topic_loc = np.median(coord_arr, axis=0)
topic_positions[topic] = topic_loc
colors = np.array([color for name, color in mcolors.XKCD_COLORS.items()])
title = "t-SNE Clustering of {} Topics".format(n_topics)
x = tsne_lda[:,0]
y = tsne_lda[:,1]
color = colors[topic_num]
fig = plt.figure(figsize=(6,6), dpi=dpi)
ax = fig.add_subplot(111)
ax.scatter(x, y, color=color, marker='.', s=marker_size)
center_dots_x = []
center_dots_y = []
for topic in topic_positions:
x, y = topic_positions[topic]
center_dots_x.append(x)
center_dots_y.append(y)
if custom_titles is not None:
text = custom_titles[topic+1]
else:
text = "Topic {}".format(topic+1)
bbox=dict(facecolor=[1,1,1,0.5], edgecolor=colors[topic], boxstyle='round')
txt_box = ax.text(x, y, text, horizontalalignment='center', verticalalignment='center', fontsize=5,
)
txt_box.set_bbox(bbox)
fig.suptitle(title)
ax.set_xlabel("Component 1")
ax.set_ylabel("Component 2")
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_tsne_doc_cluster3d(model, nlp_data, doc_list=None, corpus=None, min_tw=None, marker_size=1, seed=2020,
show_topics=False, custom_titles=None, show=True, fig_save_path=None):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(n_topics):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus)):
temp_dict = {t:w for t, w in row_list}
for topic in range(n_topics):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
arr = pd.DataFrame(topic_weights).fillna(0).values
if min_tw is not None:
arr = arr[np.amax(arr, axis=1) >= min_tw]
topic_num = np.argmax(arr, axis=1)
tsne_model = TSNE(n_components=3, verbose=1, random_state=seed, angle=0.99, init='pca', n_jobs=-1)
tsne_lda = tsne_model.fit_transform(arr)
# Calculate geometric mean of doc coordinates to place topic titles
topic_positions = {}
if show_topics:
for topic in range(n_topics):
topic_arr = topic_num == topic
coord_arr = tsne_lda[topic_arr]
topic_loc = np.median(coord_arr, axis=0)
topic_positions[topic] = topic_loc
colors = np.array([color for name, color in mcolors.XKCD_COLORS.items()])
title = "t-SNE Clustering of {} Topics".format(n_topics)
x = tsne_lda[:,0]
y = tsne_lda[:,1]
z = tsne_lda[:,2]
color = colors[topic_num]
fig = plt.figure(figsize=(6,6), dpi=300)
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, color=color, marker='.', s=marker_size)
center_dots_x = []
center_dots_y = []
for topic in topic_positions:
x, y, z = topic_positions[topic]
center_dots_x.append(x)
center_dots_y.append(y)
if custom_titles is not None:
text = custom_titles[topic+1]
else:
text = "Topic {}".format(topic+1)
bbox=dict(facecolor=[1,1,1,0.5], edgecolor=colors[topic], boxstyle='round')
txt_box = ax.text(x, y, z, text, horizontalalignment='center', verticalalignment='center', fontsize=5,
)
txt_box.set_bbox(bbox)
fig.suptitle(title)
ax.set_xlabel("Component 1")
ax.set_ylabel("Component 2")
ax.set_zlabel("Component 3")
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def plot_tsne_doc_cluster(model, nlp_data, doc_list=None, corpus=None, min_tw=None, marker_size=1, seed=2020,
dpi=450, show_topics=False, topic_names=None, show=True, show_legend=False,
fig_save_path=None, tp_args=None, **kwargs):
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(n_topics):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus)):
temp_dict = {t:w for t, w in row_list}
for topic in range(n_topics):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
arr = pd.DataFrame(topic_weights).fillna(0).values
if min_tw is not None:
arr = arr[np.amax(arr, axis=1) >= min_tw]
topic_num = np.argmax(arr, axis=1)
tsne_model = TSNE(n_components=2, verbose=1, random_state=seed, angle=0.99, init='pca', n_jobs=-1)
tsne_lda = tsne_model.fit_transform(arr)
topic_positions = {}
if show_topics:
for topic in range(n_topics):
topic_arr = topic_num == topic
coord_arr = tsne_lda[topic_arr]
topic_loc = np.median(coord_arr, axis=0)
topic_positions[topic] = topic_loc
colors = np.array([color for name, color in mcolors.XKCD_COLORS.items()])
title = "t-SNE Clustering of {} Topics".format(n_topics)
x = tsne_lda[:,0]
y = tsne_lda[:,1]
color = colors[topic_num]
fig = plt.figure(figsize=(6,6), dpi=dpi)
ax = fig.add_subplot(111)
ax.scatter(x, y, color=color, marker='.', s=marker_size)
center_dots_x = []
center_dots_y = []
if tp_args is None:
tp_args = {
'fontsize':8,
'weight':'bold'
}
for topic in topic_positions:
x, y = topic_positions[topic]
center_dots_x.append(x)
center_dots_y.append(y)
text = "T{}".format(topic+1)
bbox=dict(facecolor=[1,1,1,0.6], edgecolor=[0,0,0,0], pad=0, boxstyle='round')
txt_box = ax.text(x, y, text, horizontalalignment='center', verticalalignment='center', **tp_args
)
txt_box.set_bbox(bbox)
legend_list = []
for topic in range(n_topics):
if topic_names is None:
text = "Topic {}".format(topic+1)
legend_list.append(text)
else:
text = topic_names[topic+1]
legend_list.append(text)
if show_legend:
kwargs2 = kwargs.copy()
if 'size' in kwargs2:
kwargs2['size'] += 8
else:
kwargs2['size'] = 'xx-large'
t = ax.transAxes
canvas = ax.figure.canvas
y = 0.985
add_offset = 0.002
for i, topic in enumerate(legend_list):
ax.text(1.025, y, '\u2219', color=colors[i], transform=t, ha='center', va='center', **kwargs2)
txt_box = ax.text(1.05, y, topic, color='black', transform=t, ha='left', va='center', **kwargs)
txt_box.draw(canvas.get_renderer())
ex = txt_box.get_window_extent()
t = transforms.offset_copy(txt_box.get_transform(), y=-ex.height, units='dots')
y -= add_offset
fig.suptitle(title)
ax.set_xlabel("Component 1")
ax.set_ylabel("Component 2")
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
def rows_per_df_grp(df, grouping_column): # Takes a dataframe and returns a dict of dataframes by the grouping column, and a list of counts
grp_df = df.groupby(grouping_column)
grouped_row_df_dict = {}
row_counts = []
for grp, data in grp_df:
grouped_row_df_dict[grp] = data
row_counts.append((grp, len(data)))
return grouped_row_df_dict, row_counts
def generate_mallet_models(data_path, data_column, model_save_folder, figure_save_folder, topic_num, model_params,
file_name_append=None, seed=None, **kwargs):
with Timing("Loading Data..."):
df = pd.read_csv(data_path)
data = df[data_column].tolist()
with Timing('Processing Data...'):
nlp_params = dict(spacy_lib='en_core_sci_lg', max_df=.25, bigrams=True, trigrams=True, max_tok_len=30)
for key in kwargs:
if key in nlp_params:
nlp_params[key] = kwargs[key]
nlp_data = data_nl_processing_v2.NlpForLdaInput(data, **nlp_params)
nlp_data.start()
with Timing("Building Models..."):
os.makedirs(model_save_folder, exist_ok=True)
if seed is None:
seed = int(time()*100)-158000000000
if file_name_append is None:
append = ''
else:
append = file_name_append
models = model_params
model_list = []
for model in models:
mallet_model = MalletModel(nlp_data, topics=topic_num, seed=seed, model_type='mallet', **model)
mallet_model.start()
save_path = model_save_folder + 'mallet_t{}a{}o{}{}'.format(topic_num, model['alpha'], model['optimize_interval'], append)
mallet_model.save(save_path)
model_list.append((mallet_model, save_path))
with open(model_save_folder + 'mallet_parameters_{}T{}.txt'.format(topic_num, append), 'w') as para_file:
file_string_list = []
file_string_list.append("Model Parameters for {} Topics \n".format(topic_num))
file_string_list.append("\n")
for i in range(len(model_list)):
file_string_list.append("Mallet model t{}a{}o{} Parameters: \n".format(topic_num, models[i]['alpha'], models[i]['optimize_interval']))
file_string_list.append("Model {}/{} generated \n".format(i+1, len(model_list)))
file_string_list.append("File Path: {} \n".format(model_list[i][1]))
file_string_list.append("{} Coherence: {} \n".format(model_list[i][0].coherence, model_list[i][0].model_raw['coherence']))
for key in model_list[i][0].parameters:
file_string_list.append("{}: {} \n".format(key, model_list[i][0].parameters[key]))
file_string_list.append("\n")
para_file.writelines(file_string_list)
with Timing("Creating Figures..."):
os.makedirs(figure_save_folder, exist_ok=True)
for i in range(len(model_list)):
save_path = figure_save_folder + 'mallet_t{}a{}o{}s{}{}.html'.format(
topic_num, models[i]['alpha'], models[i]['optimize_interval'], seed, append)
panel = pyLDAvis.gensim.prepare(model_list[i][0].model, model_list[i][0].nlp_data.gensim_lda_input(), model_list[i][0].nlp_data.get_id2word(),
mds='tsne', sort_topics=False)
pyLDAvis.save_html(panel, save_path)
for i in range(len(model_list)):
save_path = figure_save_folder + 'mallet_wordcloud_t{}a{}o{}s{}{}.png'.format(
topic_num, models[i]['alpha'], models[i]['optimize_interval'], seed, append)
create_multi_wordclouds(topic_num, 8, model_list[i][0].model, model_list[i][0].nlp_data, num_w=20, fig_dpi=400,
show=False, fig_save_path=save_path)
def graph_coherence(data_path_list, title=None, x_label=None, y_label=None, show=False, fig_save_path=None, box_plot=True, **kwargs):
graphs = {}
for data_path in data_path_list:
df = pd.read_csv(data_path)
columns = list(df.columns)[1:]
for column in columns:
if column in graphs:
graphs[column].extend(df[column].tolist())
else:
graphs[column] = df[column].tolist()
labels = list(graphs.keys())
x_values = []
for label in labels:
x_values.append(graphs[label])
# Show graph
plt.figure(figsize=(12, 8))
if box_plot:
plt.boxplot(x_values, labels=labels, **kwargs)
else:
mean_sd_dict = {'X':[], 'MEAN':[], 'STDV':[]}
for label in graphs:
mean_sd_dict['X'].append(label)
mean_sd_dict['MEAN'].append(np.mean(graphs[label]))
mean_sd_dict['STDV'].append(np.std(graphs[label]))
# Plot graph of x VS y with standard deviation for error bars
params = dict(fmt='_', markersize=10, capsize=5, markeredgewidth=1, c='Black')
for key in kwargs:
params[key] = kwargs[key]
plt.errorbar(mean_sd_dict['X'], mean_sd_dict['MEAN'],
yerr=mean_sd_dict['STDV'], **params)
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.xticks(rotation=45, ha='right')
plt.tight_layout()
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()# Closes and deletes graph to free up memory
def plot_topic_groups(df_data, topic_groups, n_horiz=5, fig_dpi=150, ylabel=None, xlabel=None, show=True, merge_graphs=False,
fig_save_path=None, relative_val=True, x_val=None, xtick_space=None, xmintick_space=None, hide_x_val=True,
colors=['tab:blue'], linear_reg=False):
# df_data needs to be one of the outputs from doc_topics_per_time dataframes or data frame with topics in first column and labeled 'Topic'
topics_list = [topic-1 for group in topic_groups for topic in topic_groups[group]]
group_list = [group for group in topic_groups]
n_groups = len(group_list)
columns = list(df_data.columns)[1:]
column_totals = df_data.loc[topics_list,columns[0]:].sum(axis=0)
column_totals_list = list(column_totals)
graphs = {}
grouped_df = pd.DataFrame()
for group in topic_groups:
data = df_data.loc[df_data['Topic'].isin(topic_groups[group]), columns[0]:].sum(axis=0)
if relative_val:
data = data / column_totals_list
data.fillna(0, inplace=True)
graphs[group] = data.values.flatten().tolist()
else:
graphs[group] = data.values.flatten().tolist()
data = data.to_frame().T
data['Topic Group'] = group
data['Topics'] = str(topic_groups[group])[1:-1]
grouped_df = pd.concat([grouped_df, data])
grouped_df.reset_index(drop=True, inplace=True)
new_columns = ['Topic Group'] + ['Topics'] + columns
grouped_df = grouped_df[new_columns]
# Plotting
x_len = n_horiz
y_len = math.ceil(n_groups/n_horiz)
if x_val is None:
x_val = list(range(1, len(columns)+1))
if merge_graphs:
fig = plt.figure(figsize=(12, 8))
if n_groups > 10:
plt.gca().set_prop_cycle(cycler(color=plt.get_cmap('tab20').colors))
for graph in graphs:
plt.plot(columns, graphs[graph], label=graph)
plt.legend(loc='best')
else:
fig, axes = plt.subplots(y_len, x_len, figsize=(2*x_len, 1.5*y_len), dpi=fig_dpi,
sharex=True, sharey=True, squeeze=False, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i < n_groups:
ax.plot(x_val, graphs[group_list[i]], color=colors[0])
ax.title.set_text(group_list[i])
if xtick_space is not None: ax.xaxis.set_major_locator(ticker.MultipleLocator(xtick_space))
if xmintick_space is not None: ax.xaxis.set_minor_locator(ticker.MultipleLocator(xmintick_space))
if hide_x_val:ax.set_xticklabels([])
for label in ax.get_xticklabels():
label.set_rotation(45)
label.set_ha('right')
else:
ax.axis('off')
#plt.tight_layout()
if xlabel is not None:
fig.text(0.5, 0, xlabel, ha='center', va='top', fontsize=14)
if ylabel is not None:
fig.text(0, 0.5, ylabel, ha='right', va='center', fontsize=14, rotation=90)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight')
if show:
plt.show()
plt.close()
if linear_reg:
x = np.array(range(len(columns))).reshape(-1, 1)
lr_dict = {
'Topic Group':[],
'Topics':[],
'Coefficient':[],
'R^2':[]
}
for group in topic_groups:
lin_reg_mod = linear_model.LinearRegression()
lin_reg_mod.fit(x, graphs[group])
lr_dict['Topic Group'].append(group)
lr_dict['Topics'].append(str(topic_groups[group])[1:-1])
lr_dict['Coefficient'].append(lin_reg_mod.coef_[0])
lr_dict['R^2'].append(lin_reg_mod.score(x, graphs[group]))
df_lr = pd.DataFrame(lr_dict)
return grouped_df, df_lr
else:
return grouped_df
def build_summary_df(df_bestdoc, df_nt, df_ty, topic_names=None, rel_val=True):
df_lr = plot_doc_topics_per_time(df_ty, n_topics=len(df_bestdoc["Topic Number"]), show=False, relative_val=rel_val, linear_reg=True)
columns_names = ["Topic", "Keywords", "Document Count", "Coefficient", "R^2"]
topic_kw_counts_dict = {
columns_names[0]:df_bestdoc["Topic Number"].to_list(),
columns_names[1]:df_bestdoc["Topic Keywords"].to_list(),
columns_names[2]:df_nt["Document Count"].tolist(),
columns_names[3]:df_lr["Coefficient"].tolist(),
columns_names[4]:df_lr["R^2"].tolist(),
}
if topic_names is not None:
columns_names.insert(1, "Name")
p = re.compile(r'T\d\d*:') # This removes the topic number that is in my topic labels e.g. 'T1:'
topic_names_list = [
topic_names[i+1][re.match(p, topic_names[i+1]).end():]
if re.match(p, topic_names[i+1]) is not None else topic_names[i+1]
for i in range(len(topic_names))
]
topic_kw_counts_dict[columns_names[1]] = topic_names_list
df_nt_kw_lr = pd.DataFrame(topic_kw_counts_dict)
df_nt_kw_lr = df_nt_kw_lr[columns_names]
return df_nt_kw_lr
def build_cooc_matrix_df(model, nlp_data, doc_list=None, corpus=None, min_tw=None):
# This creates 2 co-occurence matrix dataframes
# The first returned df is by topic weights
# The second df ignores topic weights and a document has a topic if its weight is greater than min_tw or 0.1
if corpus is None:
if doc_list is None:
corpus = nlp_data.gensim_lda_input()
else:
corpus = nlp_data.process_new_corpus(doc_list)['gensim']
n_topics = model.num_topics
topic_weights= {}
for i in range(1, n_topics+ 1):
topic_weights[i] = []
for i, row_list in enumerate(model.get_document_topics(corpus, minimum_probability=0.001)):
temp_dict = {t+1:w for t, w in row_list}
for topic in range(1, n_topics+1):
if topic in temp_dict:
topic_weights[topic].append(temp_dict[topic])
else:
topic_weights[topic].append(0)
arr = pd.DataFrame(topic_weights).fillna(0)
if min_tw is not None:
arr_n = arr[arr >= min_tw].fillna(0)
else:
arr_n = arr[arr >= 0.1].fillna(0)
arr_n[arr_n > 0] = 1
df_cooc_w = arr.T.dot(arr)
df_cooc_n = arr_n.T.dot(arr_n)
np.fill_diagonal(df_cooc_w.values, 0)
np.fill_diagonal(df_cooc_n.values, 0)
return df_cooc_w, df_cooc_n
def plot_heatmap(df_matrix, topic_names=None, show=True, fig_save_path=None):
#plots a heatmap of the passed co-occurence matrix
#fig, ax = plt.subplots(figsize=(8,8))
#plt.figure(figsize=(8,8), dpi=300, facecolor='w')
sns.set(font_scale=0.8)
sns.heatmap(df_matrix, linewidths = 0.5, square=True, cmap='YlOrRd', xticklabels=True, yticklabels=True)
plt.tight_layout()
# This saves and/or shows the plot. Note: Saved file looke better than the drawn plot
length = len(df_matrix.index)
plt.ylim(length, 0)
plt.xlim(0, length)
#ax.set_ylim(length, 0)
#ax.set_xlim(0, length)
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight', dpi=300)
if show:
plt.show()
plt.close()
def plot_clusterheatmap(df_matrix, topic_names=None, show=True, fig_save_path=None, **kwargs):
#plots a cluster heatmap of the passed co-occurence matrix
df1 = df_matrix.copy()
# This relabels the columns and rows if topic names is passed
if topic_names is not None:
df1.index = [topic_names[topic] for topic in df1.index]
df1.columns = [topic_names[int(topic)] for topic in df1.columns]
# This if for calculating the linkage manually outside of the cluster method because
# the co-occurence matrix is an uncondensed distance matrix. To properly calculate linkage
# the matrix must be reprocessed with values closer to 0 indicating stronger association.
# To accomplish this the max value in the matrix is used as the new 0 and all other values are
# max value - matrix value. The diagnol is reassigned to 0, and then the matrix is transformed
# into a condensed distance matrix as input to the linkage method and the result
# is used for the clustering method.
df2 = df_matrix.values.max() - df_matrix
np.fill_diagonal(df2.values, 0)
df3 = hc.linkage(ssd.squareform(df2), method='average')
sns.set(font_scale=0.9)
# This makes the cluster graph and assigns a reference to it as sns_grid
sns_grid = sns.clustermap(df1,
row_linkage = df3,
col_linkage = df3,
**kwargs)
plt.tight_layout()
#sns_grid.savefig("reports/main_a5/testing.png")
# This adjusts the rotation and positions of the x and y tick labels
sns_grid.ax_heatmap.set_yticklabels(sns_grid.ax_heatmap.get_yticklabels(), rotation=0)
if topic_names is not None:
sns_grid.ax_heatmap.set_xticklabels(sns_grid.ax_heatmap.get_xticklabels(), rotation=-60, ha='left')
xd = -10/72
offset = mpl.transforms.ScaledTranslation(xd, 0, sns_grid.fig.dpi_scale_trans)
for label in sns_grid.ax_heatmap.get_xticklabels():
label.set_transform(label.get_transform() + offset)
# This is to ensure that the heatmap is of the appropriate size.
# There were issues where part of the heatmap was cutoff
length = len(df1.index)
sns_grid.ax_heatmap.set_ylim(length, 0)
sns_grid.ax_heatmap.set_xlim(0, length)
# This saves and/or shows the plot.
if fig_save_path is not None:
plt.savefig(fig_save_path, bbox_inches='tight', dpi=300)
if show:
plt.show()
plt.close()
def import_topic_names(file_path_name):
# imports topic names from a spreadsheet with the first columns containing topic numbers and
# the second containing topic names
if file_path_name[-3:] == 'csv':
df = pd.read_csv(file_path_name)
elif file_path_name[-3:] in ['xls','lsx', 'lsm', 'lsb', 'odf']:
df = pd.read_excel(file_path_name)
else:
raise NameError('Unsupported file format, please provide csv or excel file')
topic_names_dict = dict(zip(df.iloc[:,0].values, df.iloc[:,1].values))
return topic_names_dict
class MalletModel:
def __init__(self, nlp_data, topics=20, seed=0, topn=20, coherence='c_v', model_type='mallet',
mallet_path='C:\\mallet\\bin\\mallet', **parameters):
self.nlp_data = nlp_data
self.topics = topics
self.seed = seed
self.mallet_path = mallet_path
self.model = None
self.model_raw = None
self.coherence = coherence
self.topn = topn
self.model_type = model_type
if model_type == 'mallet':
self.parameters = {
'mallet_path':self.mallet_path,
'workers':5,
'random_seed':self.seed,
'alpha':50,
'iterations':1000,
'optimize_interval':0
}
elif model_type == 'gensim':
if self.seed == 0:
self.seed = None
self.parameters = {
'decay':0.5,
'alpha':'asymmetric',
'eta':None,
'workers':None,
'random_state':self.seed,
'chunksize':100,
'passes':10,
'per_word_topics':True
}
else:
raise ValueError("model_type must be either 'mallet' or 'gensim'")
for key in parameters:
self.parameters[key] = parameters[key]
def start(self, verbose=True):
print("Initiating model building...")
t0 = time()
if self.model_type == 'mallet':
self.model_raw = self.mallet_model_(self.topics)
self.model = gensim.models.wrappers.ldamallet.malletmodel2ldamodel(self.model_raw['model'])
elif self.model_type == 'gensim':
self.model_raw = self.gensim_model_(self.topics)
self.model = self.model_raw['model']
print("Model done in {:0.3f}s".format(time()-t0))
def gensim_model_(self, topics):
tm = time()
print("Building Gensim model...")
model = gensim.models.LdaMulticore(corpus=self.nlp_data.gensim_lda_input(),
id2word=self.nlp_data.get_id2word(),
num_topics=topics,
**self.parameters)
model_time = (time() - tm)
print("Done in %0.3fs." % model_time)
tc = time()
print("Running Coherence model for Gensim...")
model_topics_list = self.gensim_topic_words_(model.show_topics(formatted=False, num_words=self.topn, num_topics=-1))
coh_model = gensim.models.CoherenceModel(topics=model_topics_list, texts=self.nlp_data.get_token_text(),
dictionary=self.nlp_data.get_id2word(), window_size=None,
coherence=self.coherence)
model_coherence = coh_model.get_coherence()
print("Done in %0.3fs." % (time() - tc))
print("Gensim coherence for {} topics is: {:0.3f}".format(topics, model_coherence))
return {"coherence":model_coherence, "time":model_time, "topics":model_topics_list, "model":model}
def mallet_model_(self, topics):
tm = time()
print("Building Mallet model...")
model = gensim.models.wrappers.LdaMallet(corpus=self.nlp_data.gensim_lda_input(),
id2word=self.nlp_data.get_id2word(),
num_topics=topics,
**self.parameters)
model_time = (time() - tm)
print("Done in %0.3fs." % model_time)
tc = time()
print("Running Coherence model for Mallet...")
model_topics_list = self.gensim_topic_words_(model.show_topics(formatted=False, num_words=self.topn, num_topics=-1))
coh_model = gensim.models.CoherenceModel(topics=model_topics_list, texts=self.nlp_data.get_token_text(),
dictionary=self.nlp_data.get_id2word(), window_size=None,
coherence=self.coherence)
model_coherence = coh_model.get_coherence()
print("Done in %0.3fs." % (time() - tc))
print("Mallet coherence for {} topics is: {:0.3f}".format(topics, model_coherence))
return {"coherence":model_coherence, "time":model_time, "topics":model_topics_list, "model":model}
def gensim_topic_words_(self, show_topics):
show_topics.sort()
topic_word_list = []
for topic in show_topics:
message = "Topic #%d: " % topic[0]
new_list = list(word[0] for word in topic[1])
message += ", ".join(new_list)
topic_word_list.append(new_list)
print(message)
print()
return topic_word_list
def output_parameters(self, save=False, path=None):
if save:
with open(path, 'w') as file:
file.write(str(self.parameters))
return self.parameters
def save(self, file_path_name):
with open(file_path_name, 'wb') as file:
pickle.dump(self, file)
class SilentPrinting:
def __init__(self, verbose=False):
self.verbose=verbose
def __enter__(self):
if not self.verbose:
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.verbose:
sys.stdout.close()
sys.stdout = self._original_stdout
class Timing:
def __init__(self, text=None):
if text is None:
text = "Initiating..."
self.text = text
self.t = None
def __enter__(self):
self.t = time()
print(self.text)
def __exit__(self, exc_type, exc_val, exc_tb):
comp_time = time() - self.t
print("Done in {:.3f}s.".format(comp_time))
if __name__ == "__main__": # Code only runs if this file is run directly. This is for testing purposes
testing_graph = False
if testing_graph:
all_coh_file_list = glob.glob('.\\reports\\t(5_100_5)a3g*coh.csv')
all_time_file_list = glob.glob('.\\reports\\t(5_100_5)a3g*time.csv')
methods_coh_file_list = glob.glob('.\\reports\\t(5_100_5)m3g*coh.csv')
no_methods_coh_file_list = glob.glob('.\\reports\\t(5_100_5)t3g*coh.csv')
plot_model_comparison(all_coh_file_list, 'Number of Topics', ['Gensim', 'Mallet', 'Sklearn'],
"Number of Topics", "C_V Coherence", "Model Coherence Comparison", show = True,
fig_save_path = 'reports/figures/all_coh_means_stdv.png', csv_save_path = 'reports/all_coh_means_stdv.csv')
plot_model_comparison(all_time_file_list, 'Number of Topics', ['Gensim', 'Mallet', 'Sklearn'],
"Number of Topics", "Time (sec)", "Model Time Comparison", show = True,
fig_save_path = 'reports/figures/all_time_means_stdv.png', csv_save_path = 'reports/all_time_means_stdv.csv')
plot_model_comparison(methods_coh_file_list, 'Number of Topics', ['Mallet'],
"Number of Topics", "C_V Coherence", "Model Coherence Comparison", show = True,
fig_save_path = 'reports/figures/met_coh_means_stdv.png', csv_save_path = 'reports/met_coh_means_stdv.csv')
plot_model_comparison(no_methods_coh_file_list, 'Number of Topics', ['Mallet'],
"Number of Topics", "C_V Coherence", "Model Coherence Comparison", show = True,
fig_save_path = 'reports/figures/no_met_coh_means_stdv.png', csv_save_path = 'reports/no_met_coh_means_stdv.csv')
build_model = False
if build_model:
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
print("Loading dataset for CompareModels testing...")
t0 = time()
df = pd.read_csv(data_path)
data = df[data_column].tolist()
print("done in %0.3fs." % (time() - t0))
spacy_library = 'en_core_sci_lg'
nlp_data = data_nl_processing.NlpForLdaInput(data, spacy_lib=spacy_library, max_df=.25, bigrams=True, trigrams=True)
nlp_data.start()
model_seed = int(time()*100)-158000000000
model_m = MalletModel(nlp_data, topics=35, seed=model_seed, model_type='mallet')
model_m.start()
model_m.save('models/demo_mallet_model')
model_g = MalletModel(nlp_data, topics=35, seed=model_seed, model_type='gensim')
model_g.start()
model_g.save('models/demo_gensim_model')
panel = pyLDAvis.gensim.prepare(model_m.model, model_m.nlp_data.gensim_lda_input(), model_m.nlp_data.get_id2word(),
mds='tsne', sort_topics=False)
pyLDAvis.save_html(panel, 'reports/figures/pylda_vis_sample_mallet.html')
panel = pyLDAvis.gensim.prepare(model_g.model, model_g.nlp_data.gensim_lda_input(), model_g.nlp_data.get_id2word(),
mds='tsne', sort_topics=False)
pyLDAvis.save_html(panel, 'reports/figures/pylda_vis_sample_gensim.html')
load_models = False
if load_models:
with open('models/demo_mallet_model', 'rb') as model:
mallet_model = pickle.load(model)
with open('models/demo_gensim_model', 'rb') as model:
gensim_model = pickle.load(model)
panel = pyLDAvis.gensim.prepare(gensim_model.model, gensim_model.nlp_data.gensim_lda_input(), gensim_model.nlp_data.get_id2word(),
mds='tsne', sort_topics=False)
pyLDAvis.save_html(panel, 'reports/figures/pylda_vis_sample_gensim.html')
panel = pyLDAvis.gensim.prepare(mallet_model.model, mallet_model.nlp_data.gensim_lda_input(), mallet_model.nlp_data.get_id2word(),
mds='tsne', sort_topics=False)
pyLDAvis.save_html(panel, 'reports/figures/pylda_vis_sample_mallet.html')
if False:
with open('models\\t(5_100_5)a3g278879807mod', 'rb') as model:
model_a278879807 = pickle.load(model)
with open('models\\t(5_100_5)t3g282506889mod', 'rb') as model:
model_t282506889 = pickle.load(model)
with open('models\\t(5_100_5)m3g284431709mod', 'rb') as model:
model_m284431709 = pickle.load(model)
model_a278879807.output_dataframe(save=True, path='reports/t(5_100_5)a3g278879807coh.csv')
model_a278879807.output_dataframe(save=True, path='reports/t(5_100_5)a3g278879807time.csv',data_column="time")
model_t282506889.output_dataframe(save=True, path='reports/t(5_100_5)t3g282506889coh.csv')
model_m284431709.output_dataframe(save=True, path='reports/t(5_100_5)m3g284431709coh.csv')
if False:
with open('models/demo_mallet_model', 'rb') as model:
mallet_model = pickle.load(model)
topic_df = dominant_doc_topic_df(mallet_model.model, mallet_model.nlp_data)
print(topic_df.head(10))
topic_df.to_csv('reports/testing_dom_topic_func.csv')
if False: # Testing dataframe functions and total token counts
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
topic_df = dominant_doc_topic_df(mallet_model.model, mallet_model.nlp_data)
print(topic_df.head(10))
topic_df.to_csv('reports/testing_dom_topic_func.csv')
best_doc_df = best_doc_for_topic(topic_df)
print(best_doc_df.head(10))
best_doc_df.to_csv('reports/testing_best_doc_func.csv')
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
df = pd.read_csv(data_path)
raw_text = df[data_column].tolist()
doc_list = best_doc_df["Best Document"]
new_column = []
for doc in doc_list:
new_column.append(raw_text[int(doc-1)])
best_doc_raw_df = best_doc_df.copy()
best_doc_raw_df["Raw Text"] = pd.Series(new_column).values
best_doc_raw_df.to_csv('reports/testing_best_doc_raw.csv')
plot_doc_token_counts(topic_df,fig_save_path='reports/figures/testing_plotdoctokencounts.png')
#creat_wordcloud(1, mallet_model.model, mallet_model.nlp_data, fig_save_path='reports/figures/testing_createwordsclouds.png')
if False: # Word cloud for a single topic
with open('models/main_mallet_t40a50o0', 'rb') as model:
mallet_model = pickle.load(model)
create_wordcloud(1, mallet_model.model, mallet_model.nlp_data, num_w=20,
fig_save_path='reports/figures/testing_createwordsclouds.png')
if False: # Wordclouds for multiple topics
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
create_multi_wordclouds([33, 40], 1, mallet_model.model, mallet_model.nlp_data, num_w=20, fig_dpi=400, custom_titles=MAIN_TOPICS_TRUNC,
show=False, fig_save_path='reports/figures/testing_createmultiwordsclouds_t33_40.png', title_font=14)
create_multi_wordclouds(1, 1, mallet_model.model, mallet_model.nlp_data, num_w=20, fig_dpi=400, custom_titles=MAIN_TOPICS_TRUNC,
show=False, fig_save_path='reports/figures/testing_createmultiwordsclouds_1_new.png', title_font=14)
if False: # color doc with topics
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
df = pd.read_csv(data_path)
raw_text = df[data_column].tolist()
color_doc_topics(mallet_model.model, raw_text[49], mallet_model.nlp_data, topics=5, max_chars=120, incl_perc=True,
topic_names=MAIN_TOPICS_TRUNC, fig_save_path='reports/figures/testing_colordoctopics_highlightall.png',
highlight=True)
if False: # docs per topic dataframes
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
df = pd.read_csv(data_path)
raw_text = df[data_column].tolist()
df1, df2 = docs_per_topic(mallet_model.model, mallet_model.nlp_data)
print(df2.head())
if False: # docs per time dataframe
with open('models/main_mallet_t40a25o200', 'rb') as model:
mallet_model = pickle.load(model)
data_path = 'data/external/data_cleaned.csv'
data_column = 'title_abstract'
df = pd.read_csv(data_path)
raw_text = df[data_column].tolist()
year_list = df['year'].tolist()
df1, df2 = doc_topics_per_time(mallet_model.model, mallet_model.nlp_data, year_list=year_list, year_res=5)
df1.to_csv('reports/test_doc_n_per_year.csv', index=False)
df2.to_csv('reports/test_doc_w_per_year.csv', index=False)
if False: # plot docs per time
data_path1 = 'reports/test_doc_n_per_year.csv'
data_path2 = 'reports/test_doc_w_per_year.csv'
df1 = pd.read_csv(data_path1)
df2 = pd.read_csv(data_path2)
x_val = [1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
plot_doc_topics_per_time(df1, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/figures/doc_plot_time_n.png',
x_val=x_val, hide_x_val=True, xtick_space=10, relative_val=True, show=False)
plot_doc_topics_per_time(df1, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/figures/doc_plot_time_n_abs.png',
x_val=x_val, hide_x_val=False, xtick_space=10, custom_titles=MAIN_TOPICS, relative_val=True,
df_data2=df1, relative_val2=False, ylabel2="Absolute Count of Documents", show=False)
plot_doc_topics_per_time(df1, 40, 8, ylabel='Proportion of Documents', xlabel='Years', fig_save_path='reports/figures/doc_plot_time_n_w.png',
df_data2=df2, custom_titles=MAIN_TOPICS, show=False)
if False: # plot docs per time
data_path1 = 'reports/test_doc_n_per_year.csv'
data_path2 = 'reports/test_doc_w_per_year.csv'
df1 = pd.read_csv(data_path1)
df2 = | pd.read_csv(data_path2) | pandas.read_csv |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import inspect
import pandas as pd
pd.options.display.colheader_justify = 'right'
| pd.set_option('display.unicode.east_asian_width', True) | pandas.set_option |
#This class defines the Scoreboard data structure and its associated methods
import pandas as pd
import os
from dotenv import load_dotenv
import boto3
class Scoreboard:
standard_display = ["Member","Score"]
all_time_display = ["Member","AllTime"]
commits_display = ["Member","Commits"]
#Load up S3 and create the scoreboard as "df"
def __init__(self):
load_dotenv()
self.s3 = boto3.resource(
service_name='s3',
region_name='us-east-2',
aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY')
)
obj = self.s3.Bucket('bdc-scoreboard').Object("Big-Data-Club/scoreboard.csv").get()
self.df = | pd.read_csv(obj["Body"], index_col=0) | pandas.read_csv |
# This scripts are possible solutions for the Database Tasks
# Packages that might have to be installed via pip/conda:
# conda install pandas
# conda install MySQL-python
# conda install mysqlclient
# conda install pymongo
# conda install sqlite3
# General Packages
import pandas as pd
# Packages for MySQL
import MySQLdb as mysql
# Packages for MongoDB
import pymongo as pm
# Packages for SQLite
import sqlite3 as sqll
# Basic Methods for reading the content of MySQL, MongoDB and SQL Lite
# Read the content of the MySQL Table and return as Pandas Data Frame
def read_data_mysql():
print("Reading out Data with MYSQL")
conn = mysql.connect(
host ="172.16.31.10",
port=3306,
user="seminar_user",
password="<PASSWORD>",
db="seminar"
)
query = "SELECT * FROM persons"
dataframe = pd.read_sql(query, conn)
return dataframe
# Read the content of the MongoDB Collection and return as Pandas Data Frame
def read_data_mongodb():
print("Reading out Data with MongoDB")
mongo_client = pm.MongoClient('mongodb://seminar_user:[email protected]:27017/seminar')
mongo_db = mongo_client['seminar']
collection = mongo_db['persons']
content = list(collection.find().sort([("age", pm.ASCENDING)]))
return pd.DataFrame(content)
# Read the content of the SQL Lite File and return as Pandas Data Frame
# Please make sure that the "persons.db" file is in the same folder as the Python notebook that you are running
def read_data_sqlite():
print("Reading out Data with SQL Lite")
conn_lite = sqll.connect("persons.db")
query = "SELECT * FROM persons;"
dataframe = pd.read_sql_query(query, conn_lite)
return dataframe
def question_1_a(data_frame):
print("Length of Persons List", data_frame['id'].count())
def question_1_b(data_frame):
print("Length of Persons List", len(data_frame))
def question_1_c():
conn = mysql.connect(
host="172.16.31.10",
port=3306,
user="seminar_user",
password="<PASSWORD>",
db="seminar"
)
query = "SELECT COUNT(*) FROM persons"
dataframe = pd.read_sql(query, conn)
print("Length of Persons List", dataframe['COUNT(*)'][0])
def question_2_a(data_frame):
max_age = data_frame['age'].max()
print("Oldest Person's age:", max_age)
for index, item in data_frame.iterrows():
if item["age"] == max_age:
print(item)
def question_2_b(data_frame):
conn = mysql.connect(
host="172.16.31.10",
port=3306,
user="seminar_user",
password="<PASSWORD>",
db="seminar"
)
max_age = data_frame['age'].max()
query = "SELECT * FROM persons WHERE age=" + str(max_age)
data_frame_max = pd.read_sql(query, conn)
print(data_frame_max)
def question_2_c(data_frame):
max_age = data_frame['age'].max()
mongo_client = pm.MongoClient('mongodb://seminar_user:[email protected]:27017/seminar')
mongo_db = mongo_client['seminar']
collection = mongo_db['persons']
content = list(collection.find({"age": 70}))
data_frame_max = pd.DataFrame(content)
print(data_frame_max)
def question_3_a(data_frame):
summed_ages = 0
number_ages = 0
for index, item in data_frame.iterrows():
summed_ages = summed_ages + item['age']
number_ages = number_ages + 1
print("Average age:", summed_ages / number_ages)
def question_3_b(data_frame):
avg_age = data_frame['age'].mean()
print("Average Age:", avg_age)
def question_5_a():
conn = mysql.connect(
host="172.16.31.10",
port=3306,
user="seminar_user",
password="<PASSWORD>",
db="seminar"
)
query = "SELECT * FROM persons ORDER BY firstName LIMIT 3"
first_name_frame = | pd.read_sql(query, conn) | pandas.read_sql |
import unittest
import pytest
from pyalink.alink import *
def print_value_and_type(v):
print(type(v), v)
class TestAkStream(unittest.TestCase):
def setUp(self) -> None:
self.lfs = LocalFileSystem()
self.hfs = HadoopFileSystem("2.8.3", "hdfs://xxx:9000")
self.ofs = OssFileSystem("3.4.1", "xxx", "xxx", "xxx", "xxx")
@pytest.mark.skip()
def test_stream(self):
import numpy as np
import pandas as pd
arr = np.array([
[1, 2, 3],
[1, 2, 3],
[3, 4, 5]
])
df = | pd.DataFrame(arr) | pandas.DataFrame |
import argparse as ap
from itertools import product
from typing import List, Tuple
import bnet.utype as ut
import numpy as np
import yaml
from nptyping import NDArray
from pandas.core.frame import DataFrame
class Config(dict):
def __init__(self, path: str):
f = open(path, "r")
self.__path = path
d: dict = yaml.safe_load(f)
[self.__setitem__(item[0], item[1]) for item in d.items()]
f.close()
@property
def n(self) -> List[int]:
return self["n"]
@property
def ro(self) -> List[float]:
return self["ro"]
@property
def q_operant(self) -> Tuple[float, float, float]:
s = str(self["q-operant"]).lstrip("(").rstrip(")").split(",")
return tuple(map(lambda c: float(c), s))
@property
def q_other(self) -> Tuple[float, float, int]:
s = str(self["q-other"]).lstrip("(").rstrip(")").split(",")
return tuple(map(lambda c: float(c), s))
@property
def amount_training(self) -> List[int]:
return self["amount-training"]
@property
def schedule_value(self) -> List[float]:
return self["schedule-value"]
@property
def loop_per_condition(self) -> int:
return self["loop-per-condition"]
@property
def loop_per_simulation(self) -> int:
return self["loop-per-simulation"]
@property
def filename(self) -> str:
return self["filename"]
class ConfigClap(object):
def __init__(self):
self._parser = ap.ArgumentParser()
self._parser.add_argument("--yaml",
"-y",
help="path to configuration file (`yaml`)",
type=str)
self._args = self._parser.parse_args()
def config(self) -> Config:
yml = self._args.yaml
return Config(yml)
def generate_rewards(operant: ut.RewardValue, others: ut.RewardValue,
n: int) -> ut.RewardValues:
rewards: ut.RewardValues = np.full(n, others)
rewards[0] = operant
return rewards
def free_access(agent: ut.Agent, rewards: ut.RewardValues, n: int, loop: int):
current_action: ut.Node = np.random.choice(n)
number_of_operant = 0
number_of_all_response = 0
for i in range(loop):
next_action = agent.choose_action(rewards)
if next_action == current_action:
continue
paths = agent.find_path(current_action, next_action)
path = paths[np.random.choice(len(paths))][1:]
number_of_operant += int(0 in path)
number_of_all_response += len(path)
current_action = next_action
return number_of_operant, number_of_all_response
def run_baseline_test(agent: ut.Agent, rewards_baseline: ut.RewardValues,
rewards_test: ut.RewardValues, n: int,
loop: int) -> Tuple[int, int, float, float]:
operant_baseline, total_baseline = free_access(agent, rewards_baseline, n,
loop)
operant_test, total_test = free_access(agent, rewards_test, n, loop)
operant_deg = int(agent.network.degree[0])
median_deg = int(np.median(agent.network.degree))
prop_baseline = operant_baseline / total_baseline
prop_test = operant_test / total_test
return operant_deg, median_deg, prop_baseline, prop_test
def format_data1(result: List[Tuple[int, float, float, float, int, int, float,
float]]):
columns = [
"n", "ro", "q-operant", "q-other", "operant-degree", "other-degree",
"baseline", "test"
]
data = DataFrame(result, columns=columns)
return data
def format_data2(result: List[Tuple[float, int, float, int, int, float,
float]]):
columns = [
"schedule-value", "n", "other-reward", "amount-training",
"operant-degree", "other-degree", "baseline", "test"
]
data = | DataFrame(result, columns=columns) | pandas.core.frame.DataFrame |
#!/usr/bin/env python3
import gc
import os
import pickle
import fire
import h5py
import matplotlib.pyplot as plt
import seaborn as sns
from hyperopt.fmin import generate_trials_to_calculate
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_recall_curve
from numpy import linalg as LA
import sklearn.metrics as metrics
import json
import lightgbm as lgb
import numpy as np
import pandas as pd
import glob
from sklearn.preprocessing import QuantileTransformer
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from sklearn.metrics import average_precision_score
from early_stopping_avg import early_stopping
from hyperopt import STATUS_OK
from hyperopt import hp
from timeit import default_timer as timer
import numpy as np
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import fmin
def focal_isoform_binary_object(pred, dtrain, alpha=0.5, beta=0.0, gamma=2.0):
# alpha controls weight of positives
# (0,1) less
# >1 more or(0-0.5 less, 0.5-1 more)
# beta controls the shift of loss function
# >0 to left(less weight to well-trained samples)
# gamma controls the steepness of loss function
# >0
label = dtrain.get_label()
x = beta + (2.0 * label - 1) * gamma * pred
p = 1. / (1. + np.exp(-x))
# grad = (1 + (alpha - 1) * label) * (2 * label - 1) * (p - 1)
grad = (1 - label + (label * 2 - 1) * alpha) * (2 * label - 1) * (p - 1)
# hess = (1 + (alpha - 1) * label) * gamma * (1 - p) * p
hess = (1 - label + (label * 2 - 1) * alpha) * gamma * (1 - p) * p
return grad, hess
def lgb_auprc_score(y_hat, data):
y_true = data.get_label()
# TODO try not to round yhat
# y_hat = np.round(y_hat) # scikits f1 doesn't like probabilities
return 'auprc', average_precision_score(y_true, y_hat), True
class LightGBMModel(object):
def __init__(self, config_file, training_tf_name=None,
cofactor_motif_set_file=None, quantile_transformer_path=None, dnase_feature_path=None,
motif_feature_path=None, selected_motif_feature_path=None, step=120):
with open(config_file, "r") as infile:
config = yaml.load(infile, Loader=Loader)
self.config = config
self.chrom_all = config['chrom_all']
self.region_topic_model_h5 = config['region_topic_model_h5']
self.dic_chrom_length = {}
self.chrom_sets = config['chrom_sets']
self.training_tf_name = training_tf_name
self.dic_chrom_length = {}
with open(config['chrom_size_file'], "r") as infile:
for line in infile:
line = line.strip().split("\t")
if line[0] in self.chrom_all:
self.dic_chrom_length[line[0]] = int(line[1])
# if regions_all_file is not None:
# self.df_all_regions = pd.read_csv(regions_all_file, sep="\t", header=None)
# self.df_all_regions.columns = ['chr', 'start', 'stop']
# else:
# self.df_all_regions = None
if training_tf_name is not None:
self.df_all_regions_label = pd.read_csv(
"%s/%s.%s" % (
config['training_cell_types_regions_label_path'], training_tf_name,
config['training_cell_types_regions_label_name']),
sep="\t", header=0)
else:
self.df_all_regions_label = None
if cofactor_motif_set_file is not None:
with open(cofactor_motif_set_file, "r") as infile:
self.cofactor_motif_set = json.load(infile)
else:
self.cofactor_motif_set = None
if quantile_transformer_path is None:
self.quantile_transformer_path = "./train/quantile_transformer"
else:
self.quantile_transformer_path = quantile_transformer_path
if dnase_feature_path is None:
self.dnase_feature_path = "./hdf5s/DNase"
else:
self.dnase_feature_path = dnase_feature_path
if motif_feature_path is None:
self.motif_feature_path = "./hdf5s/motif"
else:
self.motif_feature_path = motif_feature_path
if selected_motif_feature_path is None:
self.selected_motif_feature_path = "./hdf5s/motif"
else:
self.selected_motif_feature_path = selected_motif_feature_path
self.step = step
def prepare_motif_h5_data(self, chrom):
df_temp = self.df_all_regions_label[self.df_all_regions_label['chr'] == chrom].copy()
df_temp = df_temp.iloc[:, :3]
with h5py.File("%s/%s_motifs_top4_scores.h5" % (self.motif_feature_path, chrom), "r") as infile:
motif_names = infile['motif_names'][...]
motif_names = list(map(lambda x: x.decode('UTF-8'), motif_names))
# if selected_tfs is None:
# selected_tfs = motif_names
# selected_tfs=["EGR","KLF","SPI",'ETV','ZNF','GABP']
# row_indexs = [i for i, v in enumerate(motif_names) if any([tf_name in v for tf_name in selected_tfs])]
# selected_tfs_names = [v for i, v in enumerate(motif_names) if
# any([tf_name in v for tf_name in selected_tfs])]
row_index = [i for i, v in enumerate(motif_names) if v in self.cofactor_motif_set]
selected_motifs = [motif_names[i] for i in row_index]
# print(row_index)
scores = infile["scores"][row_index, :, :]
# for i in [-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13]:
for i in [-7, -5, -3, -1, 0, 1, 3, 5, 7]:
# print("%s %d" % (chrom, i))
region_index = np.array(list(map(lambda x: x / 50 + i, df_temp["start"])))
region_index = np.clip(region_index, 0, scores.shape[1] - 1)
scores_region = scores[:, region_index.astype(int), :]
for ind, j in enumerate(selected_motifs):
# for ind, j in enumerate(self.cofactor_motif_set):
for k in range(4):
df_temp["%s_offset_%d_top_%d" % (j, i, k)] = scores_region[ind, :, k]
with h5py.File('%s/%s_motif_features_lightGBM.h5' % (self.selected_motif_feature_path, chrom), "w") as outfile:
outfile.create_dataset("feature_names", data=np.array(df_temp.iloc[:, 3:].columns, dtype='S'),
shape=(df_temp.shape[1] - 3,),
dtype='S200', compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("starts", data=df_temp['start'].tolist(), shape=(df_temp.shape[0],),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("scores", data=df_temp.iloc[:, 3:].values, dtype=np.float32,
shape=(df_temp.shape[0], df_temp.shape[1] - 3),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=4)
def prepare_dnase_autoencoder_h5_data(self, cell_line, chrom, outfile_path):
df_temp = self.df_all_regions_label[self.df_all_regions_label['chr'] == chrom].copy()
df_temp = df_temp.iloc[:, :3]
# for cell_line in self.config['cell_types']:
with h5py.File(
"%s/DNASE.%s.merge.binSize.1.corrected_sorted_hg19_25bpbin_bwaverage_transformed_%s_scanned_with_autoencoder_v4.hdf5" % (
'/n/scratchlfs/xiaoleliu_lab/cchen/Cistrome_imputation/encode/data/DNase_scanning/scan_result',
cell_line, chrom), "r") as infile:
# print(row_index)
scores = infile["DNase_feature_scanning"][:, :]
# for i in [-13,-11,-9,-7,-5,-3,-1,0,1,3,5,7,9,11,13]:
for i in [-12, -8, -4, 0, 4, 8, 12]:
# print("%s %d" % (chrom, i))
region_index = np.array(list(map(lambda x: x / 50 + i, df_temp["start"])))
region_index = np.clip(region_index, 0, scores.shape[0] - 1)
scores_region = scores[region_index.astype(int), :]
for k in range(32):
df_temp["DNase_autoencoder_offset_%d_%d" % (i, k)] = scores_region[:, k]
with h5py.File('%s/DNASE_autoencoder_lightGBM.%s.%s.h5' % (outfile_path, chrom, cell_line), "w") as outfile:
outfile.create_dataset("feature_names", data=np.array(df_temp.iloc[:, 3:].columns, dtype='S'),
shape=(df_temp.shape[1] - 3,),
dtype='S200', compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("starts", data=df_temp['start'].tolist(), shape=(df_temp.shape[0],),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)
outfile.create_dataset("scores", data=df_temp.iloc[:, 3:].values, dtype=np.float32,
shape=(df_temp.shape[0], df_temp.shape[1] - 3),
compression='gzip', shuffle=True, fletcher32=True, compression_opts=4)
def get_dnase_features(self, cell_line, chrom, dir_dnase_feature_median, selected_bin_index_file=None):
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
samples = list(infile['samples'][...])
cell_line = str(cell_line)
samples = list(map(lambda x: x.decode('UTF-8'), samples))
cell_line_index = np.where(np.array(samples) == cell_line)[0][0]
if selected_bin_index_file is None:
cell_line_scores = infile[chrom][cell_line_index, :, :]
else:
selected_bin_index = np.load(selected_bin_index_file)
cell_line_scores = infile[chrom][cell_line_index, selected_bin_index, :]
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# dir_dnase_feature_median, chrom), "r") as infile:
# if selected_bin_index_file is None:
# median_scores = infile[chrom][:, :]
# else:
# selected_bin_index = np.load(selected_bin_index_file)
# median_scores = infile[chrom][selected_bin_index, :]
# scores = np.hstack((cell_line_scores, median_scores))
# return scores
return cell_line_scores
def get_dnase_features_autoencoder(self, cell_line, chrom, feature_path, selected_bin_index_file=None):
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
size = infile[chrom].shape[1]
with h5py.File("%s/DNASE_autoencoder_lightGBM.%s.%s.h5" % (feature_path, chrom, cell_line), "r") as infile:
if selected_bin_index_file is None:
cell_line_scores = infile['scores'][:size, :]
else:
selected_bin_index = np.load(selected_bin_index_file)
cell_line_scores = infile['scores'][:size, :]
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# dir_dnase_feature_median, chrom), "r") as infile:
# if selected_bin_index_file is None:
# median_scores = infile[chrom][:, :]
# else:
# selected_bin_index = np.load(selected_bin_index_file)
# median_scores = infile[chrom][selected_bin_index, :]
# scores = np.hstack((cell_line_scores, median_scores))
# return scores
return cell_line_scores
def prepare_lightgbm_binary_dnase_feature(self, cell_line, chrom_set_name, dir_dnase_feature_median, dir_out,
reference=None, selected_bin_index_file=None, ATAC_long_short=False):
cell_line = str(cell_line)
chrom = "chr19"
# TODO change to 50bp or 100bp
with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
self.dnase_feature_path, chrom), "r") as infile:
needed_feature_names = list(infile['feature_names'][...])
needed_feature_names = list(map(lambda x: x.decode('UTF-8'), needed_feature_names))
# with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_median.h5" % (
# # dir_dnase_feature_median, chrom), "r") as infile:
# # median_feature_names = list(infile['feature_names'][...])
# # median_feature_names = list(map(lambda x: x.decode('UTF-8'), median_feature_names))
# # needed_feature_names += median_feature_names
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
if not ATAC_long_short:
for chrom in chrom_set:
scores = self.get_dnase_features(cell_line, chrom, dir_dnase_feature_median, selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
# TODO change to 50bp or 100bp
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, cell_line),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
_ = qt.transform(all_score)
# _ = qt.transform(all_score[:, :int(all_score.shape[1] / 2)])
# _ = qt.transform(all_score[:, :cell_line_scores.shape[1]])
if reference is not None:
# reference = lgb.Dataset(glob.glob("%s/lightGBM.dnase.*.*.bin" % reference)[0])
reference = lgb.Dataset(reference)
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
else:
list_scores_short_long = []
for frag_size in ['short','long']:
list_scores = []
for chrom in chrom_set:
scores = self.get_dnase_features("%s_%s" % (cell_line, frag_size), chrom, dir_dnase_feature_median,
selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
# TODO change to 50bp or 100bp
with open("%s/%s_variable_bp_quantile_map.pkl" % (self.quantile_transformer_path, "%s_%s" % (cell_line, frag_size)),
'rb') as fin:
qt = pickle.load(fin, encoding='latin1')
_ = qt.transform(all_score)
# _ = qt.transform(all_score[:, :int(all_score.shape[1] / 2)])
# _ = qt.transform(all_score[:, :cell_line_scores.shape[1]])
list_scores_short_long.append(all_score)
all_score_short_long = np.hstack(list_scores_short_long)
if reference is not None:
# reference = lgb.Dataset(glob.glob("%s/lightGBM.dnase.*.*.bin" % reference)[0])
reference = lgb.Dataset(reference)
needed_feature_names_short_long = ['%s_%s' % (feature_name, frag_size) for frag_size in ['short','long']
for feature_name in needed_feature_names
]
train_data = lgb.Dataset(all_score_short_long, feature_name=list(needed_feature_names_short_long), reference=reference)
train_data.save_binary("%s/lightGBM.dnase.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def prepare_lightgbm_binary_dnase_feature_autoencoder(self, cell_line, chrom_set_name, feature_path,
dir_out,
reference=None, selected_bin_index_file=None):
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
for chrom in chrom_set:
scores = self.get_dnase_features_autoencoder(cell_line, chrom, feature_path,
selected_bin_index_file)
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
if reference is not None:
reference = lgb.Dataset(glob.glob("%s/lightGBM.autoencoder.dnase.*.*.bin" % reference)[0])
needed_feature_names = []
for i in [-12, -8, -4, 0, 4, 8, 12]:
for k in range(32):
needed_feature_names.append("DNase_autoencoder_offset_%d_%d" % (i, k))
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
train_data.save_binary("%s/lightGBM.autoencoder.dnase.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
def prepare_lightgbm_binary_data_motif_feature_subset(self, chrom_set_name, subset_index, dir_out,
selected_bin_index_file=None, reference=None):
chrom = "chr19"
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
all_feature_names = list(infile['feature_names'][...])
all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
needed_feature_names = [all_feature_names[i:i + self.step]
for i in range(0, len(all_feature_names), self.step)][subset_index - 1]
feature_index = [list(range(i, min(i + self.step, len(all_feature_names)))) for i in
range(0, len(all_feature_names), self.step)][subset_index - 1]
# needed_feature_names = list(map(lambda x: x.decode('UTF-8'), needed_feature_names))
list_scores = []
chrom_set = self.chrom_sets[chrom_set_name]
with h5py.File(self.region_topic_model_h5, "r") as region_topic_infile:
for chrom in chrom_set:
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
# feature_names = list(infile['feature_names'][...])
# feature_names = list(map(lambda x: x.decode('UTF-8'), feature_names))
# feature_index = [i for i, v in enumerate(feature_names) if (v in needed_feature_names)]
if selected_bin_index_file is None:
scores = infile['scores'][:, feature_index]
if subset_index == 1:
scores = np.hstack([region_topic_infile[chrom][:, :], scores])
else:
selected_bin_index = np.load(selected_bin_index_file)
scores = infile['scores'][selected_bin_index, feature_index]
if subset_index == 1:
scores = np.hstack([region_topic_infile[chrom][selected_bin_index, :], scores])
list_scores.append(scores)
# print(cell_line, chrom, subset_index)
all_score = np.vstack(list_scores)
if reference is not None:
reference = lgb.Dataset(glob.glob("%s/lightGBM.motif.*.%d.bin" % (reference, subset_index))[0])
if subset_index == 1:
# needed_feature_names = ["topic_%d" % topic_id for topic_id in range(9)] \
# + needed_feature_names
# train_data = lgb.Dataset(all_score, categorical_feature=[8],
# feature_name=list(needed_feature_names), reference=reference)
needed_feature_names = ["topic_%d" % topic_id for topic_id in range(1)] \
+ needed_feature_names
train_data = lgb.Dataset(all_score[:, 8:],
categorical_feature=[0],
feature_name=list(needed_feature_names), reference=reference)
else:
train_data = lgb.Dataset(all_score, feature_name=list(needed_feature_names), reference=reference)
train_data.save_binary("%s/lightGBM.motif.%s.%d.bin" % (dir_out, chrom_set_name, subset_index))
# def merge_lightgbm_binary_data(self, cell_line, chrom_set_name, dir_out):
# all_feature_names = []
# chrom = "chr22"
# # TODO change to 50bp or 100bp
# # with h5py.File("%s/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_%s_all_cell_types.h5" % (
# # self.dnase_feature_path, chrom), "r") as infile:
# # all_feature_names += list(infile['feature_names'][...])
# # chrom = "chr22"
# with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
# "r") as infile:
# all_feature_names += list(infile['feature_names'][...])
# all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
# # for cell_line in self.df_all_regions_label.columns.tolist()[3:]:
# for cell_line in [cell_line]:
# train_data_all = None
# for subset_index in range(int(np.ceil(len(all_feature_names) / self.step) + 1)):
# train_data = lgb.Dataset("%s/lightGBM.%s.%s.%d.bin" %
# (dir_out, cell_line, chrom_set_name, subset_index)).construct()
# if train_data_all is None:
# train_data_all = train_data
# else:
# # train_data_all=train_data_all.add_features_from(train_data)
# train_data_all.add_features_from(train_data)
# # print(subset_index)
# train_data_all.save_binary("%s/lightGBM_all.%s.%s.bin" % (dir_out, cell_line, chrom_set_name))
# print(cell_line, chrom_set_name)
def merge_lightgbm_binary_data(self, cell_line, chrom_set_name, dir_out=None, lightgbm_dnase_binary_files_path=None,
lightgbm_motif_binary_files_path=None):
if dir_out is None:
dir_out = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_motif_binary_files_path is None:
lightgbm_motif_binary_files_path = "./train/%s/binary_files" % self.training_tf_name
if lightgbm_dnase_binary_files_path is None:
lightgbm_dnase_binary_files_path = "./train/data/dnase_feature_binary_files"
cell_line = str(cell_line)
all_feature_names = []
chrom = "chr19"
# TODO change to 50bp or 100bp
with h5py.File("%s/%s_motif_features_lightGBM.h5" % (self.selected_motif_feature_path, chrom),
"r") as infile:
all_feature_names += list(infile['feature_names'][...])
all_feature_names = list(map(lambda x: x.decode('UTF-8'), all_feature_names))
train_data_all = lgb.Dataset("%s/lightGBM.dnase.%s.%s.bin" %
(lightgbm_dnase_binary_files_path, cell_line, chrom_set_name)).construct()
for subset_index in range(int(np.ceil(len(all_feature_names) / self.step))):
train_data = lgb.Dataset("%s/lightGBM.motif.%s.%d.bin" %
(lightgbm_motif_binary_files_path, chrom_set_name, subset_index + 1)).construct()
train_data_all.add_features_from(train_data)
temp = []
chrom_set = self.chrom_sets[chrom_set_name]
for chrom in chrom_set:
df_temp = self.df_all_regions_label.loc[self.df_all_regions_label['chr'] == chrom, :]
temp.append(df_temp)
df_all_temp = | pd.concat(temp, ignore_index=True) | pandas.concat |
"""
<NAME>017
Variational Autoencoder - Pan Cancer
scripts/vae_pancancer.py
Usage:
Run in command line with required command arguments:
python scripts/vae_pancancer.py --learning_rate
--batch_size
--epochs
--kappa
--depth
--output_filename
--num_components
--scale
--subset_mad_genes
--dataset
Typically, arguments to this script are compiled automatically.
See `scripts/num_components_paramsweep.py` for more details
Output:
Loss and validation loss for the specific model trained
"""
import os
import sys
import argparse
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
from keras.layers import Input, Dense, Lambda, Layer, Activation
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras import backend as K
from keras import metrics, optimizers
from keras.callbacks import Callback
def run_vae(rnaseq_file, learning_rate, batch_size, epochs, kappa, depth, first_layer, output_filename, latent_dim, scale, subset_mad_genes, data_basename):
# Random seed
seed = int(np.random.randint(low=0, high=10000, size=1))
np.random.seed(seed)
# Load Data
#file = 'train_{}_expression_matrix_processed.tsv.gz'.format(dataset.lower())
#rnaseq_file = os.path.join('..', '0.expression-download', 'data', file)
rnaseq_df = | pd.read_table(rnaseq_file, index_col=0) | pandas.read_table |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv(StringIO(self.data1), nrows=3.0)
tm.assert_frame_equal(df, expected)
msg = r"'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=1.2)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), nrows=-1)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# with invalid chunksize value:
msg = r"'chunksize' must be an integer >=1"
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=1.3)
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize='foo')
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(self.data1), chunksize=0)
def test_read_chunksize_and_nrows(self):
# gh-15755
# With nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=2, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# chunksize > nrows
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(pd.concat(reader), df)
# with changing "size":
reader = self.read_csv(StringIO(self.data1), index_col=0,
chunksize=8, nrows=5)
df = self.read_csv(StringIO(self.data1), index_col=0, nrows=5)
tm.assert_frame_equal(reader.get_chunk(size=2), df.iloc[:2])
tm.assert_frame_equal(reader.get_chunk(size=4), df.iloc[2:5])
with pytest.raises(StopIteration):
reader.get_chunk(size=3)
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
assert len(piece) == 2
def test_read_chunksize_generated_index(self):
# GH 12185
reader = self.read_csv(StringIO(self.data1), chunksize=2)
df = self.read_csv(StringIO(self.data1))
tm.assert_frame_equal(pd.concat(reader), df)
reader = self.read_csv(StringIO(self.data1), chunksize=2, index_col=0)
df = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(pd.concat(reader), df)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# See gh-6607
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
assert isinstance(treader, TextFileReader)
# gh-3967: stopping iteration when chunksize is specified
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
assert len(result) == 3
tm.assert_frame_equal(pd.concat(result), expected)
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# test bad parameter (skipfooter)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skipfooter=1)
pytest.raises(ValueError, reader.read, 3)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_blank_df(self):
# GH 14545
data = """a,b
"""
df = self.read_csv(StringIO(data), header=[0])
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(df, expected)
round_trip = self.read_csv(StringIO(
expected.to_csv(index=False)), header=[0])
tm.assert_frame_equal(round_trip, expected)
data_multiline = """a,b
c,d
"""
df2 = self.read_csv(StringIO(data_multiline), header=[0, 1])
cols = MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
expected2 = DataFrame(columns=cols)
tm.assert_frame_equal(df2, expected2)
round_trip = self.read_csv(StringIO(
expected2.to_csv(index=False)), header=[0, 1])
tm.assert_frame_equal(round_trip, expected2)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
assert df.index.name is None
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = self.read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/parser/data/salaries.csv')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_localpath(
df.to_csv,
lambda p: self.read_csv(p, index_col=0))
tm.assert_frame_equal(df, result)
def test_nonexistent_path(self):
# gh-2428: pls no segfault
# gh-14086: raise more helpful FileNotFoundError
path = '%s.csv' % tm.rands(10)
pytest.raises(compat.FileNotFoundError, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
assert result['D'].isna()[1:].all()
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = | StringIO(s) | pandas.compat.StringIO |
import streamlit as st
from alphapept.gui.utils import (
check_process,
init_process,
start_process,
escape_markdown,
)
from alphapept.paths import PROCESSED_PATH, PROCESS_FILE, QUEUE_PATH, FAILED_PATH
from alphapept.settings import load_settings_as_template, save_settings
import os
import psutil
import datetime
import pandas as pd
import time
import yaml
import alphapept.interface
def queue_watcher():
"""
Start the queue_watcher.
"""
# This is in pool and should be reporting.
print(f"{datetime.datetime.now()} Started queue_watcher")
init_process(PROCESS_FILE)
while True:
queue_files = [_ for _ in os.listdir(QUEUE_PATH) if _.endswith(".yaml")]
# print(f'{datetime.datetime.now()} queue_watcher running. {len(queue_files)} experiments to process.')
if len(queue_files) > 0:
created = [
time.ctime(os.path.getctime(os.path.join(QUEUE_PATH, _)))
for _ in queue_files
]
queue_df = pd.DataFrame(queue_files, columns=["File"])
queue_df["Created"] = created
file_to_process = queue_df.sort_values("Created")["File"].iloc[0]
file_path = os.path.join(QUEUE_PATH, file_to_process)
settings = load_settings_as_template(file_path)
current_file = {}
current_file["started"] = datetime.datetime.now()
current_file["file"] = file_to_process
current_file_path = os.path.join(QUEUE_PATH, "current_file")
with open(current_file_path, "w") as file:
yaml.dump(current_file, file, sort_keys=False)
logfile = os.path.join(
PROCESSED_PATH, os.path.splitext(file_to_process)[0] + ".log"
)
try:
settings_ = alphapept.interface.run_complete_workflow(
settings, progress=True, logfile=logfile
)
save_settings(settings_, os.path.join(PROCESSED_PATH, file_to_process))
except Exception as e:
print(f"Run {file_path} failed with {e}")
settings_ = settings.copy()
settings_["error"] = f"{e}"
save_settings(settings_, os.path.join(FAILED_PATH, file_to_process))
if os.path.isfile(current_file_path):
os.remove(current_file_path)
os.remove(file_path)
else:
time.sleep(15)
def terminate_process():
with st.spinner("Terminating processes.."):
running, last_pid, p_name, status, queue_watcher_state = check_process(
PROCESS_FILE
)
parent = psutil.Process(last_pid)
procs = parent.children(recursive=True)
for p in procs:
p.terminate()
gone, alive = psutil.wait_procs(procs, timeout=3)
for p in alive:
p.kill()
parent.terminate()
parent.kill()
st.success(f"Terminated {last_pid}")
current_file = os.path.join(QUEUE_PATH, "current_file")
with open(current_file, "r") as file:
cf_ = yaml.load(file, Loader=yaml.FullLoader)
cf = cf_["file"]
file_in_process = os.path.join(QUEUE_PATH, cf)
target_file = os.path.join(FAILED_PATH, cf)
os.rename(file_in_process, target_file)
st.success(
f"Moved {escape_markdown(file_in_process)} to {escape_markdown(target_file)}"
)
if os.path.isfile(current_file):
os.remove(current_file)
st.success(f"Cleaned up {escape_markdown(current_file)}")
time.sleep(3)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
def status():
st.write("# Status")
st.text(
f"This page shows the status of the current analysis.\nSwitch to `New experiment` to define a new experiment.\nSwitch to `Results` to see previous results."
)
status_msg = st.empty()
failed_msg = st.empty()
current_log = ""
log_txt = []
st.write("## Progress")
overall_txt = st.empty()
overall_txt.text("Overall: 0%")
overall = st.progress(0)
task = st.empty()
task.text("Current task: None")
current_p = st.empty()
current_p.text("Current progess: 0%")
current = st.progress(0)
last_log = st.empty()
st.write("## Hardware utilization")
c1,c2 = st.columns(2)
c1.text("Ram")
ram = c1.progress(0)
c2.text("CPU")
cpu = c2.progress(0)
running, last_pid, p_name, status, queue_watcher_state = check_process(PROCESS_FILE)
if not running:
start_process(target=queue_watcher, process_file=PROCESS_FILE, verbose=False)
st.warning(
"Initializing Alphapept and waiting for process to start. Please refresh page in a couple of seconds."
)
if not queue_watcher_state:
with st.spinner('Waiting for AlphaPept process to start.'):
while not queue_watcher_state:
running, last_pid, p_name, status, queue_watcher_state = check_process(PROCESS_FILE)
time.sleep(1)
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
current_file = os.path.join(QUEUE_PATH, "current_file")
with st.expander(f"Full log "):
log_ = st.empty()
with st.expander(f"Queue"):
queue_table = st.empty()
with st.expander(f"Failed"):
failed_table = st.empty()
if st.checkbox("Terminate process"):
st.error(
f"This will abort the current run and move it to failed. Please confirm."
)
if st.button("Confirm"):
terminate_process()
while True:
ram.progress(
1 - psutil.virtual_memory().available / psutil.virtual_memory().total
)
cpu.progress(psutil.cpu_percent() / 100)
queue_files = [_ for _ in os.listdir(QUEUE_PATH) if _.endswith(".yaml")]
failed_files = [_ for _ in os.listdir(FAILED_PATH) if _.endswith(".yaml")]
n_failed = len(failed_files)
n_queue = len(queue_files)
if n_queue == 0:
status_msg.success(
f'{datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")} No files to process. Please add new experiments.'
)
current.progress(0)
overall.progress(0)
overall_txt.text("Overall: 0%")
task.text("None")
last_log.code("")
queue_table.table(pd.DataFrame())
else:
if os.path.isfile(current_file):
with open(current_file, "r") as file:
cf_ = yaml.load(file, Loader=yaml.FullLoader)
cf = cf_["file"]
cf_start = cf_["started"]
now = datetime.datetime.now()
delta = f"{now-cf_start}".split('.')[0]
status_msg.success(
f'{now.strftime("%d.%m.%Y %H:%M:%S")} Processing {escape_markdown(cf)}. Time elapsed {delta}'
)
logfile = os.path.join(PROCESSED_PATH, os.path.splitext(cf)[0] + ".log")
if current_log != logfile:
current_log = logfile
log_txt = []
f = open(logfile, "r")
lines = f.readlines()[-200:] # Limit to 200 lines
for line in lines:
if "__progress_current" in line:
current_p_ = float(line.split("__progress_current ")[1][:5])
current.progress(current_p_)
current_p.text(f"Current progress: {current_p_*100:.2f}%")
elif "__progress_overall" in line:
overall_p = float(line.split("__progress_overall ")[1][:5])
overall.progress(overall_p)
overall_txt.text(f"Overall: {overall_p*100:.2f}%")
elif "__current_task" in line:
task_ = line.strip("\n").split("__current_task ")[1]
task.text(f"Current task: {task_}")
else:
log_txt.append(line)
last_log.code("".join(log_txt[-3:]))
log_.code("".join(log_txt))
created = [
time.ctime(os.path.getctime(os.path.join(QUEUE_PATH, _)))
for _ in queue_files
]
queue_df = pd.DataFrame(queue_files, columns=["File"])
queue_df["Created"] = created
queue_table.table(queue_df)
if n_failed == 1:
failed_msg.error(f"{n_failed} run failed. Please check {FAILED_PATH}.")
elif n_failed > 1:
failed_msg.error(f"{n_failed} runs failed. Please check {FAILED_PATH}.")
failed_table.table( | pd.DataFrame(failed_files) | pandas.DataFrame |
"""
Helper functions for loading, converting, reshaping data
"""
import pandas as pd
import json
from pandas.api.types import CategoricalDtype
FILLNA_VALUE_CAT = 'NaN'
CATEGORICAL = "categorical"
CONTINUOUS = "continuous"
ORDINAL = "ordinal"
COLUMN_CATEGORICAL = 'categorical_columns'
COLUMN_CONTINUOUS = 'continuous_columns'
COLUMN_ORDINAL = 'ordinal_columns'
def load_local_data_as_df(filename):
with open(f'{filename}.json') as f:
metadata = json.load(f)
dtypes = {cd['name']:_get_dtype(cd) for cd in metadata['columns']}
df = pd.read_csv(f'{filename}.csv', dtype=dtypes)
metadata[COLUMN_CATEGORICAL], metadata[COLUMN_ORDINAL], metadata[COLUMN_CONTINUOUS] = _get_columns(metadata)
return df, metadata
def load_local_data_as_array(filename):
df = pd.read_csv(f'{filename}.csv')
with open(f'{filename}.json') as f:
metadata = json.load(f)
metadata[COLUMN_CATEGORICAL], metadata[COLUMN_ORDINAL], metadata[COLUMN_CONTINUOUS] = _get_columns(metadata)
data = convert_df_to_array(df, metadata)
return data, metadata
def _get_dtype(cd):
if cd['type'] == 'continuous':
return float
else:
return str
def _get_columns(metadata):
categorical_columns = list()
ordinal_columns = list()
continuous_columns = list()
for column_idx, column in enumerate(metadata['columns']):
if column['type'] == CATEGORICAL:
categorical_columns.append(column_idx)
elif column['type'] == ORDINAL:
ordinal_columns.append(column_idx)
elif column['type'] == CONTINUOUS:
continuous_columns.append(column_idx)
return categorical_columns, ordinal_columns, continuous_columns
def _load_json(path):
with open(path) as json_file:
return json.load(json_file)
def convert_array_to_df(data, metadata):
df = pd.DataFrame(data)
column_names = []
for i, col in enumerate(metadata['columns']):
column_names.append(col['name'])
if col['type'] in [CATEGORICAL, ORDINAL]:
df.iloc[:, i] = df.iloc[:, i].astype('object')
df.iloc[:, i] = df.iloc[:, i].map(pd.Series(col['i2s']))
df.columns = column_names
return df
def convert_df_to_array(df, metadata):
dfcopy = df.copy()
for col in metadata['columns']:
if col['name'] in list(dfcopy):
col_data = dfcopy[col['name']]
if col['type'] in [CATEGORICAL, ORDINAL]:
if len(col_data) > len(col_data.dropna()):
col_data = col_data.fillna(FILLNA_VALUE_CAT)
if FILLNA_VALUE_CAT not in col['i2s']:
col['i2s'].append(FILLNA_VALUE_CAT)
col['size'] += 1
cat = | CategoricalDtype(categories=col['i2s'], ordered=True) | pandas.api.types.CategoricalDtype |
from numbers import Number
from collections import Iterable
import re
import pandas as pd
from pandas.io.stata import StataReader
import numpy as np
pd.set_option('expand_frame_repr', False)
class hhkit(object):
def __init__(self, *args, **kwargs):
# if input data frame is specified as a stata data file or text file
if len(args) > 0:
if isinstance(args[0], pd.DataFrame):
self.from_dict(args[0])
else:
compiled_pattern = re.compile(r'\.(?P<extension>.{3})$')
p = re.search(compiled_pattern,str(args[0]))
if p is not None:
if (p.group('extension').lower() == "dta"):
self.read_stata(*args, **kwargs)
elif (p.group('extension').lower() == "csv" or p.group('extension').lower() == "txt"):
self.df = pd.read_csv('sample_hh_dataset.csv')
self._initialize_variable_labels()
else:
pass
# print('Unrecognized file type: %s' % p.group('extension'))
else:
pass
print('Unrecognized file type: %s' % p.group('extension'))
def _is_numeric(self, obj):
for element in obj:
try:
0+element
except TypeError:
return False
return True
def _create_key(self, dfon):
new_list = []
for mytuple in zip(*dfon):
temp_new_list_item = ''
for item in mytuple:
temp_new_list_item += str(item)
new_list += [temp_new_list_item]
return new_list
def _initialize_variable_labels(self):
# make sure variable_labels exists
try: self.variable_labels
except: self.variable_labels = {}
# make sure each column has a variable label
for var in self.df.columns.values:
# check if var is already in the list of variable labels
if var not in self.variable_labels:
self.variable_labels[var] = ''
return self.variable_labels
def _make_include_exclude_series(self, df, include, exclude):
using_excl = False
if (include is None) and (exclude is None):
# Make an array or data series same length as df with all entries true - all rows are included
include = pd.Series([True]*df.shape[0])
elif (include is not None) and (exclude is not None):
# raise an error saying that can only specify one
raise Exception("Specify either include or exclude, but not both")
elif (include is not None):
# check that dimensions and content are correct
pass
elif (exclude is not None):
# check that dimensions and content are correct
using_excl = True
include = exclude
# include = np.invert(exclude)
# Lets make sure we work with boolean include arrays/series. Convert numeric arrays to boolean
if (self._is_numeric(include)):
# Make this a boolean
include = [x!=0 for x in include]
elif (include.dtype is not np.dtype('bool')):
raise Exception('The include and exclude series or arrays must be either numeric or boolean.')
if (using_excl):
include = np.invert(include)
return include
def set_variable_labels(self, varlabeldict={}):
self._initialize_variable_labels()
for var in varlabeldict:
self.variable_labels[var] = varlabeldict[var]
return self.variable_labels
# Here is a 'count' method for calculating household size
def egen(self, operation, groupby, column, obj=None, column_label='', include=None, exclude=None, varlabel='',
replacenanwith=None):
if obj is None:
df = self.df
else:
df=obj.df
include = self._make_include_exclude_series(df, include, exclude)
if column_label == '':
column_label = '('+operation+') '+column+' by '+groupby
result = df[include].groupby(groupby)[column].agg([operation])
result.rename(columns={operation:column_label}, inplace=True)
merged = pd.merge(df, result, left_on=groupby, right_index=True, how='left')
if replacenanwith is not None:
merged[column_label][merged[column_label].isnull()]=replacenanwith
self.df = merged
self.set_variable_labels(varlabeldict={column_label:varlabel,})
return merged
def read_stata(self, *args, **kwargs):
reader = StataReader(*args, **kwargs)
self.df = reader.data()
self.variable_labels = reader.variable_labels()
self._initialize_variable_labels()
self.value_labels = reader.value_labels()
# self.data_label = reader.data_label()
return self.df
def sdesc(self, varlist=None, varnamewidth=20, vartypewidth=10, varlabelwidth=70, borderwidthinchars=100):
if varlist is None:
list_of_vars = self.df.columns.values
else:
list_of_vars = varlist
print('-'*borderwidthinchars)
print('obs: %d' % self.df.shape[0])
print('vars: %d' % len(list_of_vars))
print('-'*borderwidthinchars)
# print('--------'.ljust(varnamewidth), '---------'.ljust(vartypewidth), ' ', '--------------'.ljust(varlabelwidth), end='\n')
print('Variable'.ljust(varnamewidth), 'Data Type'.ljust(vartypewidth), ' ', 'Variable Label'.ljust(varlabelwidth), end='\n')
print('-'*borderwidthinchars)
# print('--------'.ljust(varnamewidth), '---------'.ljust(vartypewidth), ' ', '--------------'.ljust(varlabelwidth), end='\n')
for x in list_of_vars:
print(repr(x).ljust(varnamewidth), str(self.df[x].dtype).ljust(vartypewidth), ' ', self.variable_labels[x].ljust(varlabelwidth), end='\n')
return True
def from_dict(self, *args, **kwargs):
self.df = pd.DataFrame(*args, **kwargs)
self.variable_labels = {}
self.value_labels = {}
return self.df
def statamerge(self, obj, on, how='outer', mergevarname='_m', replacelabels=True):
df_using_right = obj.df
# create a unique key based on the 'on' list
dfon_left_master = [self.df[x] for x in on]
dfon_left_master2 = []
for dfx in dfon_left_master:
if dfx.dtype is not np.dtype('object'): # We want to allow string keys
dfon_left_master2 += [dfx.astype(float)] # We want 1 and 1.0 to be considered equal when converted
# to a string, so make them 1.0 and 1.0 respectively
else:
dfon_left_master2 += [dfx]
dfon_right_using = [df_using_right[x] for x in on]
dfon_right_using2 = []
for dfx in dfon_right_using:
if dfx.dtype is not np.dtype('object'):
dfon_right_using2 += [dfx.astype(float)]
else:
dfon_left_master2 += [dfx]
left_master_on_key = self._create_key(dfon_left_master2)
right_using_on_key = self._create_key(dfon_right_using2)
# create a new column in each dataset with the combined key
self.df['_left_merge_key'] = pd.Series(left_master_on_key)
df_using_right['_right_merge_key'] = | pd.Series(right_using_on_key) | pandas.Series |
#IMPORTING LIBRARIES
import numpy as np
import pandas as pd
import os
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from statistics import mean
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.utils import resample
from sklearn.preprocessing import LabelEncoder
from scipy import stats
import random
from matplotlib import rcParams
import re
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow.keras import models, regularizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from main_module import get_acc,model,split #IMPORTING FROM main_module.py
import warnings
#######################CONFIG_ONLY########################################
#SETTING UP SOME CONFIG
warnings.filterwarnings("ignore")
pd.pandas.set_option('display.max_columns',None)
pd.pandas.set_option('display.max_rows',None)
#CHECKING TF VERSIONS
print("tf version : {}".format(tf.__version__)) #IN MY CASE ITS 2.3+
print("tfjs version : {}".format(tfjs.__version__)) #IN MY CASE ITS 2.7.0
#SEEDING EVERYTHING
def seed_everything(seed):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_KERAS'] = '1'
SEED = 42
seed_everything(SEED)
#######################CONFIG_ONLY########################################
#IMPORT DATA
df3 = | pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') | pandas.read_csv |
from typing import Tuple
import warnings
warnings.simplefilter("ignore", UserWarning)
from functools import partial
from multiprocessing.pool import Pool
import pandas as pd
import numpy as np
import numpy_groupies as npg
from cellphonedb.src.core.core_logger import core_logger
from cellphonedb.src.core.models.complex import complex_helper
def get_significant_means(real_mean_analysis: pd.DataFrame,
result_percent: pd.DataFrame,
min_significant_mean: float = None) -> pd.DataFrame:
"""
Get the significant means for gene1_gene2|cluster1_cluster2.
For statistical_analysis `min_signigicant_mean` needs to be provided
and if `result_percent > min_significant_mean` then sets the value to
NaN otherwise uses the mean.
For analysis and degs analysis `min_signigicant_mean` is NOT provided
and uses `result_percent == 0` to set NaN, otherwise uses the mean.
Parameters
----------
real_mean_analysis : pd.DataFrame
Mean results for each gene|cluster combination
result_percent : pd.DataFrame
Percent results for each gene|cluster combination
min_significant_mean : float,optional
Filter value for result_percent, it's used for statistical_analysis
but it should be 0 for Non-statistical and DEGs analysis.
Example
-------
INPUT:
real mean
cluster1 cluster2 cluster
ensembl1 0.1 1.0 2.0
ensembl2 2.0 0.1 0.2
ensembl3 0.3 0.0 0.5
result percent
cluster1 cluster2 cluster
ensembl1 0.0 1.0 1.0
ensembl2 0.04 0.03 0.62
ensembl3 0.3 0.55 0.02
min_significant_mean = 0.05
RESULT:
cluster1 cluster2 cluster
ensembl1 0.1 NaN NaN
ensembl2 2.0 0.1 NaN
ensembl3 NaN NaN 0.5
Returns
-------
pd.DataFrame
Significant means data frame. Columns are cluster interactions (cluster1|cluster2)
and rows are NaN if there is no significant interaction or the mean value of the
interaction if it is a relevant interaction.
"""
significant_means = real_mean_analysis.values.copy()
if min_significant_mean:
mask = result_percent > min_significant_mean
else:
mask = result_percent == 0
significant_means[mask] = np.nan
return pd.DataFrame(significant_means,
index=real_mean_analysis.index,
columns=real_mean_analysis.columns)
def shuffle_meta(meta: pd.DataFrame) -> pd.DataFrame:
"""
Permutates the meta values aleatory generating a new meta file
Parameters
----------
meta: pd.DataFrame
Meta data
Returns
-------
pd.DataFrame
A shuffled copy of the input values.
"""
meta_copy = meta.copy()
np.random.shuffle(meta_copy['cell_type'].values)
return meta_copy
def build_clusters(meta: pd.DataFrame,
counts: pd.DataFrame,
complex_composition: pd.DataFrame,
skip_percent: bool) -> dict:
"""
Builds a cluster structure and calculates the means values.
This method builds the means and percent values for each cluster and stores
the results in a dictionary with the following keys: 'names', 'means' and
'percents'.
Parameters
----------
meta: pd.DataFrame
Meta data.
counts: pd.DataFrame
Counts data
complex_composition: pd.DataFrame
Complex data.
skip_percent: bool
Agregate means by cell types:
- True for statistical analysis
- False for non-statistical and DEGs analysis
Returns
-------
dict: Dictionary containing the following:
- names: cluster names
- means: cluster means
- percents: cluster percents
"""
CELL_TYPE = 'cell_type'
COMPLEX_ID = 'complex_multidata_id'
PROTEIN_ID = 'protein_multidata_id'
meta[CELL_TYPE] = meta[CELL_TYPE].astype('category')
cluster_names = meta[CELL_TYPE].cat.categories
# Simple genes cluster counts
cluster_means = pd.DataFrame(
npg.aggregate(meta[CELL_TYPE].cat.codes, counts.values, func='mean', axis=1),
index=counts.index,
columns=cluster_names.to_list()
)
if not skip_percent:
cluster_pcts = pd.DataFrame(
npg.aggregate(meta[CELL_TYPE].cat.codes, (counts > 0).astype(int).values, func='mean', axis=1),
index=counts.index,
columns=cluster_names.to_list()
)
else:
cluster_pcts = pd.DataFrame(index=counts.index, columns=cluster_names.to_list())
# Complex genes cluster counts
if not complex_composition.empty:
complexes = complex_composition.groupby(COMPLEX_ID).apply(lambda x: x[PROTEIN_ID].values).to_dict()
complex_cluster_means = pd.DataFrame(
{complex_id: cluster_means.loc[protein_ids].min(axis=0).values
for complex_id, protein_ids in complexes.items()},
index=cluster_means.columns
).T
cluster_means = cluster_means.append(complex_cluster_means)
if not skip_percent:
complex_cluster_pcts = pd.DataFrame(
{complex_id: cluster_pcts.loc[protein_ids].min(axis=0).values
for complex_id, protein_ids in complexes.items()},
index=cluster_pcts.columns
).T
cluster_pcts = cluster_pcts.append(complex_cluster_pcts)
return {'names': cluster_names, 'means': cluster_means, 'percents': cluster_pcts}
def filter_counts_by_interactions(counts: pd.DataFrame,
interactions: pd.DataFrame) -> pd.DataFrame:
"""
Removes counts if is not defined in interactions components
"""
multidata_genes_ids = interactions['multidata_1_id'].append(
interactions['multidata_2_id']).drop_duplicates().tolist()
counts_filtered = counts.filter(multidata_genes_ids, axis=0)
return counts_filtered
def filter_empty_cluster_counts(counts: pd.DataFrame) -> pd.DataFrame:
"""
Remove count with all values to zero
"""
if counts.empty:
return counts
filtered_counts = counts[counts.apply(lambda row: row.sum() > 0, axis=1)]
return filtered_counts
def mean_pvalue_result_build(real_mean_analysis: pd.DataFrame, result_percent: pd.DataFrame,
interactions_data_result: pd.DataFrame) -> pd.DataFrame:
"""
Merges the pvalues and means in one table
"""
mean_pvalue_result = pd.DataFrame(index=real_mean_analysis.index)
for interaction_cluster in real_mean_analysis.columns.values:
mean_pvalue_result[interaction_cluster] = real_mean_analysis[interaction_cluster].astype(str).str.cat(
result_percent[interaction_cluster].astype(str), sep=' | ')
mean_pvalue_result = | pd.concat([interactions_data_result, mean_pvalue_result], axis=1, join='inner', sort=False) | pandas.concat |
"""
Active Fairness Run through questions
"""
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.calibration import _SigmoidCalibration
from sklearn.isotonic import IsotonicRegression
from joblib import Parallel, delayed
import pathos.multiprocessing as multiprocessing
from sklearn.model_selection import train_test_split
from numpy import genfromtxt
import numpy as np
from collections import Counter
import numpy as np
import pandas as pd
import time
import random
from copy import deepcopy
class TreeNode:
'''
A node in the "featured tree"
'''
def __init__(self, threshold, dummy = False):
'''
threshold: The threshold of this node
dummy: whether it's a fake node or not (The fake node can only be the root node of the tree)
'''
self.children_left = [] # nodes in its left (and of lower level in original tree)
self.children_right = [] # nodes in its right (and of lower level in original tree)
self.threshold = threshold
self.node_set = [set(), set()] # set of leaf nodes in its left and right,
# self.node_set[0] are the nodes in the left
# self.node_set[1] are the nodes in the right
self.dummy = dummy
class TreeProcess:
def __init__(self, tree, all_features):
'''
tree: the tree trained by random forest
all_features: all possible features in this tree
'''
rootNode = 0
node_trace = []
self.children_left = tree.children_left
self.children_right = tree.children_right
child_left_dict = {}
child_right_dict = {}
for i in range(len(self.children_left)):
child_left_dict[i] = self.children_left[i]
for i in range(len(self.children_right)):
child_right_dict[i] = self.children_right[i]
self.threshold = tree.threshold
self.feature = tree.feature
self.values = tree.value
self.weighted_samples = tree.weighted_n_node_samples
# children_left, children_right, threshold, feature, values, weighted_samples used as a dict. Can provide corresponding value given an index of that node.
self.total_leaf_id = set() # ids of all leaves in this tree
self.feature2nodes = {} # dict, key is the name of features, value is the TreeNode object of the root for that 'feature tree'
self.nodeid2TreeNode = {} # dict, key is the id of nodes in original tree, value is the TreeNode object corresponds to that node
self.feature2threshold_list = {} # dict, key is name of features, value is a list of all thresholds for that feature
self.featureAndthreshold2delete_set = {} # dict, key is name of features, value is another dict, with key as threshold value, and value as a set of leaf node ids to be delted
self.tree_single_value_shape = np.shape(self.values[0]) # imitate the shape of 'self.values[0]'
self.unique_feature = set() # total features exist in this tree (different from self.feature, which are features)
if self.feature[rootNode] == -2:
assert False, "The root of a tree is a leaf, please verify"
for feature in all_features:
# construct feature tree for all features
queue = [rootNode]
if feature == self.feature[rootNode]:
# if the root node of original tree is of this feature, there is no need for a dummy
queue = []
self.nodeid2TreeNode[rootNode] = TreeNode(self.threshold[rootNode])
self.feature2nodes[feature] = self.nodeid2TreeNode[rootNode]
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[rootNode].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[rootNode].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node) # get all leaf nodes it can reach in the left sub-tree
self.nodeid2TreeNode[rootNode].node_set[0] = result_set
result_set = set()
self.node_traverse_leaf(result_set, right_node) # get all leaf nodes it can reach in the right sub-tree
self.nodeid2TreeNode[rootNode].node_set[1] = result_set
queue.append(left_node)
queue.append(right_node)
else:
# if the root node of original tree is not of this feature, we need to have a dummy root for this feature tree
self.feature2nodes[feature] = TreeNode(-1, True) # add a dummy root
result_list = []
left_node = self.children_left[rootNode]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.feature2nodes[feature].children_left = result_list
result_list = []
right_node = self.children_right[rootNode]
self.node_traverse(result_list, right_node, feature)# get all non-leaf nodes of this feature in the right sub-tree
self.feature2nodes[feature].children_right = result_list
while queue:
current_node = queue.pop(0)
if feature == self.feature[current_node]:
# find a node of given feature
self.nodeid2TreeNode[current_node] = TreeNode(self.threshold[current_node])
result_list = []
left_node = self.children_left[current_node]
self.node_traverse(result_list, left_node, feature) # get all non-leaf nodes of this feature in the left sub-tree
self.nodeid2TreeNode[current_node].children_left = result_list
result_list = []
right_node = self.children_right[current_node]
self.node_traverse(result_list, right_node, feature) # get all non-leaf nodes of this feature in the right sub-tree
self.nodeid2TreeNode[current_node].children_right = result_list
result_set = set()
self.node_traverse_leaf(result_set, left_node)
self.nodeid2TreeNode[current_node].node_set[0] = result_set # get all leaf nodes it can reach in the left sub-tree
result_set = set()
self.node_traverse_leaf(result_set, right_node)
self.nodeid2TreeNode[current_node].node_set[1] = result_set # get all leaf nodes it can reach in the right sub-tree
if self.feature[current_node] != -2:
# if not the leaf
queue.append(self.children_left[current_node])
queue.append(self.children_right[current_node])
for feature in all_features:
threshold_set = set()
queue = [self.feature2nodes[feature]] # get the root in feature tree
while queue:
currentNode = queue.pop(0)
if currentNode.dummy != True:
threshold_set.add(currentNode.threshold)
for node in currentNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentNode.children_right:
queue.append(self.nodeid2TreeNode[node])
threshold_list = sorted(list(threshold_set)) # rank the list in increasing threshold
self.feature2threshold_list[feature] = threshold_list
self.featureAndthreshold2delete_set[feature] = {}
for feature in self.feature2threshold_list.keys():
l = len(self.feature2threshold_list[feature])
if l == 0:
continue
for i in range(l):
threshold = self.feature2threshold_list[feature][i]
delete_set_equal_or_less = set() # the nodes to be deleted if equal or less than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
if threshold <= currentTreeNode.threshold:
# current value (threshold) is equal or less than threshold for this node, go to the left sub-tree for this node
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[1] # delete all leaf-nodes can be reached in the right sub-tree
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_equal_or_less |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][threshold] = delete_set_equal_or_less
delete_set_larger = set() # the nodes to be deleted if larger than the threshold
queue = [self.feature2nodes[feature]] # the root of feature tree
while queue:
currentTreeNode = queue.pop(0)
if currentTreeNode.dummy == True:
for node in currentTreeNode.children_left:
queue.append(self.nodeid2TreeNode[node])
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
else:
for node in currentTreeNode.children_right:
queue.append(self.nodeid2TreeNode[node])
delete_set_larger |= currentTreeNode.node_set[0]
self.featureAndthreshold2delete_set[feature][np.inf] = delete_set_larger
for feature in self.feature2threshold_list.keys():
if len(self.feature2threshold_list[feature]) > 0:
self.unique_feature.add(feature)
def node_traverse_leaf(self,
result_set,
currentNode):
# get all leaf nodes which can be reached starting from one node
nodeFeature = self.feature[currentNode]
if nodeFeature == -2:
result_set.add(currentNode)
self.total_leaf_id.add(currentNode)
return
self.node_traverse_leaf(result_set, self.children_left[currentNode])
self.node_traverse_leaf(result_set, self.children_right[currentNode])
def node_traverse(self,
result_list,
currentNode,
feature_target):
nodeFeature = self.feature[currentNode]
if nodeFeature == feature_target:
result_list.append(currentNode)
return
if nodeFeature == -2:
return
self.node_traverse(result_list, self.children_left[currentNode], feature_target)
self.node_traverse(result_list, self.children_right[currentNode], feature_target)
class ActiveFairness(object):
def __init__(self,
dataset_train, dataset_test,
clf,
sensitive_features = [],
target_label = []):
'''
dataset_train: training dataset, type: MexicoDataset()
dataset_test: testing dataset, type: MexicoDataset()
clf: trained randomforest classifier
sensitive_features: a list of sensitive features which should be removed when doing prediction
target_label: a list of features whose values are to be predicted
'''
assert len(target_label) == 1, print("Error in ActiveFairness, length of target_label not defined")
train = dataset_train.features
complete_data = dataset_train.metadata['previous'][0]
self.feature2columnmap = {}
test = dataset_test.features
feature_name = pd.DataFrame(complete_data.feature_names)
y_column_index = ~(feature_name.isin(sensitive_features + target_label).iloc[:, 0])
y_column_index_inverse = (feature_name.isin(sensitive_features + target_label).iloc[:, 0])
index = 0
for i in range(len(y_column_index_inverse)):
if y_column_index_inverse.iloc[i] == True:
self.feature2columnmap[complete_data.feature_names[i]] = index
index += 1
self.target_label = target_label
self.sensitive_features = sensitive_features
self.dataset_train = dataset_train
self.dataset_test = dataset_test
self.X_tr_sensitiveAtarget = pd.DataFrame(train[:, y_column_index_inverse]) # the dataframe of all samples in training dataset which only keeps the non-sensitive and target features
self.X_tr = pd.DataFrame(train[:, y_column_index])
self.y_tr = pd.DataFrame(self.dataset_train.labels[:, 0]).iloc[:, 0]
self.X_te_sensitiveAtarget = pd.DataFrame(test[:, y_column_index_inverse]) # the dataframe of all samples in testing dataset which only keeps the non-sensitive and target features
self.X_te = pd.DataFrame(test[:, y_column_index])
self.y_te = | pd.DataFrame(self.dataset_test.labels[:, 0]) | pandas.DataFrame |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = [
"TEST_YS",
"TEST_SPS",
"TEST_ALPHAS",
"TEST_FHS",
"TEST_STEP_LENGTHS_INT",
"TEST_STEP_LENGTHS",
"TEST_INS_FHS",
"TEST_OOS_FHS",
"TEST_WINDOW_LENGTHS_INT",
"TEST_WINDOW_LENGTHS",
"TEST_INITIAL_WINDOW_INT",
"TEST_INITIAL_WINDOW",
"VALID_INDEX_FH_COMBINATIONS",
"INDEX_TYPE_LOOKUP",
"TEST_RANDOM_SEEDS",
"TEST_N_ITERS",
]
import numpy as np
import pandas as pd
from sktime.utils._testing.series import _make_series
# We here define the parameter values for unit testing.
TEST_CUTOFFS_INT = [np.array([21, 22]), np.array([3, 7, 10])]
# The following timestamps correspond
# to the above integers for `_make_series(all_positive=True)`
TEST_CUTOFFS_TIMESTAMP = [
pd.to_datetime(["2000-01-22", "2000-01-23"]),
pd.to_datetime(["2000-01-04", "2000-01-08", "2000-01-11"]),
]
TEST_CUTOFFS = [*TEST_CUTOFFS_INT, *TEST_CUTOFFS_TIMESTAMP]
TEST_WINDOW_LENGTHS_INT = [1, 5]
TEST_WINDOW_LENGTHS_TIMEDELTA = [pd.Timedelta(1, unit="D"), pd.Timedelta(5, unit="D")]
TEST_WINDOW_LENGTHS_DATEOFFSET = [pd.offsets.Day(1), pd.offsets.Day(5)]
TEST_WINDOW_LENGTHS = [
*TEST_WINDOW_LENGTHS_INT,
*TEST_WINDOW_LENGTHS_TIMEDELTA,
*TEST_WINDOW_LENGTHS_DATEOFFSET,
]
TEST_INITIAL_WINDOW_INT = [7, 10]
TEST_INITIAL_WINDOW_TIMEDELTA = [pd.Timedelta(7, unit="D"), pd.Timedelta(10, unit="D")]
TEST_INITIAL_WINDOW_DATEOFFSET = [pd.offsets.Day(7), pd.offsets.Day(10)]
TEST_INITIAL_WINDOW = [
*TEST_INITIAL_WINDOW_INT,
*TEST_INITIAL_WINDOW_TIMEDELTA,
*TEST_INITIAL_WINDOW_DATEOFFSET,
]
TEST_STEP_LENGTHS_INT = [1, 5]
TEST_STEP_LENGTHS_TIMEDELTA = [pd.Timedelta(1, unit="D"), pd.Timedelta(5, unit="D")]
TEST_STEP_LENGTHS_DATEOFFSET = [pd.offsets.Day(1), pd.offsets.Day(5)]
TEST_STEP_LENGTHS = [
*TEST_STEP_LENGTHS_INT,
*TEST_STEP_LENGTHS_TIMEDELTA,
*TEST_STEP_LENGTHS_DATEOFFSET,
]
TEST_OOS_FHS = [1, np.array([2, 5], dtype="int64")] # out-of-sample
TEST_INS_FHS = [
-3, # single in-sample
np.array([-2, -5], dtype="int64"), # multiple in-sample
0, # last training point
np.array([-3, 2], dtype="int64"), # mixed in-sample and out-of-sample
]
TEST_FHS = [*TEST_OOS_FHS, *TEST_INS_FHS]
TEST_OOS_FHS_TIMEDELTA = [
[pd.Timedelta(1, unit="D")],
[pd.Timedelta(2, unit="D"), pd.Timedelta(5, unit="D")],
] # out-of-sample
TEST_INS_FHS_TIMEDELTA = [
| pd.Timedelta(-3, unit="D") | pandas.Timedelta |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Change index value so sorting will actually make a difference
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [(i - length / 2) % length for i in range(length)]
pandas_df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
if axis == "rows" or axis == 0:
length = len(modin_df.index)
modin_df.index = [
np.nan if i % 2 == 0 else modin_df.index[i] for i in range(length)
]
pandas_df.index = [
np.nan if i % 2 == 0 else pandas_df.index[i] for i in range(length)
]
else:
length = len(modin_df.columns)
modin_df.columns = [
np.nan if i % 2 == 0 else modin_df.columns[i] for i in range(length)
]
pandas_df.columns = [
np.nan if i % 2 == 0 else pandas_df.columns[i] for i in range(length)
]
modin_result = modin_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
pandas_result = pandas_df.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
pandas_df_cp.sort_index(
axis=axis, ascending=ascending, na_position=na_position, inplace=True
)
df_equals(modin_df_cp, pandas_df_cp)
# MultiIndex
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pd.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(modin_df))]
)
pandas_df.index = pandas.MultiIndex.from_tuples(
[(i // 10, i // 5, i) for i in range(len(pandas_df))]
)
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(level=0), pandas_df.sort_index(level=0))
with pytest.warns(UserWarning):
df_equals(modin_df.sort_index(axis=0), pandas_df.sort_index(axis=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(self, request, data, axis, ascending, na_position):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name and (
(axis == 0 or axis == "over rows")
or name_contains(request.node.name, numeric_dfs)
):
index = (
modin_df.index if axis == 1 or axis == "columns" else modin_df.columns
)
key = index[0]
modin_result = modin_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
key,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
keys = [key, index[-1]]
modin_result = modin_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
pandas_result = pandas_df.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=False,
)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
pandas_df_cp.sort_values(
keys,
axis=axis,
ascending=ascending,
na_position=na_position,
inplace=True,
)
df_equals(modin_df_cp, pandas_df_cp)
def test_squeeze(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
ray_df = pd.DataFrame(frame_data).squeeze()
df_equals(ray_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
ray_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(ray_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
ray_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(ray_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
ray_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(ray_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
ray_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(ray_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
def test_stack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).stack()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_std(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.std(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_style(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).style
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_sum(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.sum(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.sum(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sum_single_column(self, data):
modin_df = pd.DataFrame(data).iloc[:, [0]]
pandas_df = pandas.DataFrame(data).iloc[:, [0]]
df_equals(modin_df.sum(), pandas_df.sum())
df_equals(modin_df.sum(axis=1), pandas_df.sum(axis=1))
def test_swapaxes(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).swapaxes(0, 1)
def test_swaplevel(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.swaplevel("Number", "Color")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(self, data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.tail(n), pandas_df.tail(n))
df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))
def test_take(self):
df = pd.DataFrame(
[
("falcon", "bird", 389.0),
("parrot", "bird", 24.0),
("lion", "mammal", 80.5),
("monkey", "mammal", np.nan),
],
columns=["name", "class", "max_speed"],
index=[0, 2, 3, 1],
)
with pytest.warns(UserWarning):
df.take([0, 3])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_records(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert np.array_equal(modin_df.to_records(), pandas_df.to_records())
def test_to_sparse(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_sparse()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_string(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# Skips nan because only difference is nan instead of NaN
if not name_contains(request.node.name, ["nan"]):
assert modin_df.to_string() == to_pandas(modin_df).to_string()
def test_to_timestamp(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().to_timestamp()
def test_to_xarray(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).to_xarray()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform(self, request, data, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform_numeric(self, request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.transform(func)
except Exception as e:
with pytest.raises(type(e)):
modin_df.transform(func)
else:
modin_result = modin_df.transform(func)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
# Uncomment below once #165 is merged
# Test for map across full axis for select indices
# df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
# Test for map across full axis
# df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
# Test for map across blocks
# df_equals(modin_df.T.notna(), pandas_df.T.notna())
def test_truncate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).truncate()
def test_tshift(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.to_period().tshift()
def test_tz_convert(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles").tz_convert("America/Los_Angeles")
def test_tz_localize(self):
idx = pd.date_range("1/1/2012", periods=5, freq="M")
df = pd.DataFrame(np.random.randint(0, 100, size=(len(idx), 4)), index=idx)
with pytest.warns(UserWarning):
df.tz_localize("America/Los_Angeles")
def test_unstack(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).unstack()
def test_update(self):
df = pd.DataFrame(
[[1.5, np.nan, 3.0], [1.5, np.nan, 3.0], [1.5, np.nan, 3], [1.5, np.nan, 3]]
)
other = pd.DataFrame([[3.6, 2.0, np.nan], [np.nan, np.nan, 7]], index=[1, 3])
df.update(other)
expected = pd.DataFrame(
[[1.5, np.nan, 3], [3.6, 2, 3], [1.5, np.nan, 3], [1.5, np.nan, 7.0]]
)
df_equals(df, expected)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_values(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
np.testing.assert_equal(modin_df.values, pandas_df.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_var(self, request, data, axis, skipna, numeric_only, ddof):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
else:
modin_result = modin_df.T.var(
axis=axis, skipna=skipna, numeric_only=numeric_only, ddof=ddof
)
df_equals(modin_result, pandas_result)
def test_where(self):
frame_data = random_state.randn(100, 10)
pandas_df = pandas.DataFrame(frame_data, columns=list("abcdefghij"))
modin_df = pd.DataFrame(frame_data, columns=list("abcdefghij"))
pandas_cond_df = pandas_df % 5 < 2
modin_cond_df = modin_df % 5 < 2
pandas_result = pandas_df.where(pandas_cond_df, -pandas_df)
modin_result = modin_df.where(modin_cond_df, -modin_df)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df.loc[3]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=1)
modin_result = modin_df.where(modin_cond_df, other, axis=1)
assert all((to_pandas(modin_result) == pandas_result).all())
other = pandas_df["e"]
pandas_result = pandas_df.where(pandas_cond_df, other, axis=0)
modin_result = modin_df.where(modin_cond_df, other, axis=0)
assert all((to_pandas(modin_result) == pandas_result).all())
pandas_result = pandas_df.where(pandas_df < 2, True)
modin_result = modin_df.where(modin_df < 2, True)
assert all((to_pandas(modin_result) == pandas_result).all())
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
df = pd.DataFrame(data=d)
df = df.set_index(["class", "animal", "locomotion"])
with pytest.warns(UserWarning):
df.xs("mammal")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key = modin_df.columns[0]
modin_col = modin_df.__getitem__(key)
assert isinstance(modin_col, pd.Series)
pd_col = pandas_df[key]
df_equals(pd_col, modin_col)
slices = [
(None, -1),
(-1, None),
(1, 2),
(1, None),
(None, 1),
(1, -1),
(-3, -1),
(1, -1, 2),
]
# slice test
for slice_param in slices:
s = slice(*slice_param)
df_equals(modin_df[s], pandas_df[s])
# Test empty
df_equals(pd.DataFrame([])[:10], | pandas.DataFrame([]) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import pickle
import json
para = {
"window_size": 1,
"step_size": 0.5,
"structured_file": "BGL.log_structured.csv",
"BGL_sequence_train": "BGL_sequence_train.csv",
"BGL_sequence_test": "BGL_sequence_test.csv"
}
def load_BGL():
structured_file = para["structured_file"]
# load data
bgl_structured = pd.read_csv(structured_file)
# get the label for each log("-" is normal, else are abnormal label)
bgl_structured['Label'] = (bgl_structured['Label'] != '-').astype(int)
return bgl_structured
def bgl_sampling(bgl_structured, phase="train"):
label_data, time_data, event_mapping_data = bgl_structured['Label'].values, bgl_structured[
'Timestamp'].values, bgl_structured['EventId'].values
log_size = len(label_data)
# split into sliding window
start_time = time_data[0]
start_index = 0
end_index = 0
start_end_index_list = []
# get the first start, end index, end time
for cur_time in time_data:
if cur_time < start_time + para["window_size"]*3600:
end_index += 1
end_time = cur_time
else:
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
break
while end_index < log_size:
start_time = start_time + para["step_size"]*3600
end_time = end_time + para["step_size"]*3600
for i in range(start_index, end_index):
if time_data[i] < start_time:
i += 1
else:
break
for j in range(end_index, log_size):
if time_data[j] < end_time:
j += 1
else:
break
start_index = i
end_index = j
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
# start_end_index_list is the window divided by window_size and step_size,
# the front is the sequence number of the beginning of the window,
# and the end is the sequence number of the end of the window
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset' % inst_number)
# get all the log indexs in each time window by ranging from start_index to end_index
expanded_indexes_list = [[] for i in range(inst_number)]
expanded_event_list = [[] for i in range(inst_number)]
for i in range(inst_number):
start_index = start_end_index_list[i][0]
end_index = start_end_index_list[i][1]
if start_index > end_index:
continue
for l in range(start_index, end_index):
expanded_indexes_list[i].append(l)
expanded_event_list[i].append(event_mapping_data[l])
#=============get labels and event count of each sliding window =========#
labels = []
for j in range(inst_number):
label = 0 # 0 represent success, 1 represent failure
for k in expanded_indexes_list[j]:
# If one of the sequences is abnormal (1), the sequence is marked as abnormal
if label_data[k]:
label = 1
continue
labels.append(label)
assert inst_number == len(labels)
print("Among all instances, %d are anomalies" % sum(labels))
BGL_sequence = | pd.DataFrame(columns=['sequence', 'label']) | pandas.DataFrame |
"""
Unit and regression test for the kissim.comparison.measures module.
"""
import pytest
import numpy as np
import pandas as pd
from kissim.comparison.utils import (
format_weights,
scaled_euclidean_distance,
scaled_cityblock_distance,
)
@pytest.mark.parametrize(
"feature_weights, feature_weights_formatted",
[
(None, np.array([0.0667] * 15)),
(
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
),
],
)
def test_format_weights(feature_weights, feature_weights_formatted):
"""
Test if feature weights are added correctly to feature distance DataFrame.
Parameters
----------
feature_weights : None or list of float
Feature weights.
feature_weights_formatted : list of float
Formatted feature weights of length 15.
"""
feature_weights_formatted_calculated = format_weights(feature_weights)
assert np.isclose(
np.std(feature_weights_formatted),
np.std(feature_weights_formatted_calculated),
rtol=1e-04,
)
@pytest.mark.parametrize("feature_weights", [{"a": 0}, "bla"])
def test_format_weights_typeerror(feature_weights):
"""
Test if wrong data type of input feature weights raises TypeError.
"""
with pytest.raises(TypeError):
format_weights(feature_weights)
@pytest.mark.parametrize(
"feature_weights",
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0]],
)
def test_format_weights_valueerror(feature_weights):
"""
Test if wrong data type of input feature weights raises TypeError.
"""
with pytest.raises(ValueError):
format_weights(feature_weights)
@pytest.mark.parametrize(
"vector1, vector2, distance",
[
([], [], np.nan),
([0, 0], [4, 3], 2.5),
(np.array([0, 0]), np.array([4, 3]), 2.5),
(pd.Series([0, 0]), pd.Series([4, 3]), 2.5),
],
)
def test_scaled_euclidean_distance(vector1, vector2, distance):
"""
Test Euclidean distance calculation.
Parameters
----------
vector1 : np.ndarray or list of pd.Series
Value list (same length as vector2).
vector2 : np.ndarray or list of pd.Series
Value list (same length as vector1).
distance : float
Euclidean distance between two value lists.
"""
score_calculated = scaled_euclidean_distance(vector1, vector2)
if not np.isnan(distance):
assert np.isclose(score_calculated, distance, rtol=1e-04)
@pytest.mark.parametrize(
"vector1, vector2",
[
([0, 0], [4, 3, 3]),
],
)
def test_scaled_euclidean_distance_raises(vector1, vector2):
"""
Test if Euclidean distance calculation raises error if input values are of different
length.
"""
with pytest.raises(ValueError):
scaled_euclidean_distance(vector1, vector2)
@pytest.mark.parametrize(
"vector1, vector2, distance",
[
([], [], np.nan),
([0, 0], [4, 3], 3.5),
(np.array([0, 0]), np.array([4, 3]), 3.5),
(pd.Series([0, 0]), | pd.Series([4, 3]) | pandas.Series |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
import os
from os import listdir
from os.path import isfile, join
from shutil import copyfile
import logging
import pandas as pd
logger = logging.getLogger("root")
logging.basicConfig(
format="\033[1;36m%(levelname)s: %(filename)s (def %(funcName)s %(lineno)s): \033[1;37m %(message)s",
level=logging.DEBUG
)
class BuildLatestData(object):
"""
This script process county-level json from the CDC into a csv file with the latest data into a `latest-cdc-weekly-county-data.csv` file
"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H%M%S")
dir_current = os.path.dirname(os.path.realpath(__file__))
dir_data = "daily-cdc-county-transmission-data"
path = os.path.join(dir_current, dir_data)
def handle(self):
latest_csv = "latest-daily-cdc-county-transmission.csv"
latest_json = "latest-daily-cdc-county-transmission.json"
files = [os.path.join(self.path, f) for f in listdir(
self.path) if isfile(join(self.path, f))]
target = max(files, key=os.path.getctime)
file_saved = os.path.join(self.dir_current, self.dir_data, latest_json)
copyfile(target, file_saved)
# with open(target, encoding='utf-8') as f:
# raw_json = json.load(f)
# for item in raw_json['integrated_county_latest_external_data']:
# item['acquired_datestamp'] = os.path.basename(target)[:10]
# latest_output.append(item)
# self.update_csv(latest_csv, latest_output)
# self.create_json(latest_csv, latest_json, latest_output)
def update_csv(self, file, data):
file_saved = os.path.join(self.dir_current, self.dir_data, file)
csv_data = pd.DataFrame(data)
csv_data.to_csv(file_saved, mode='a', header=False,
encoding='utf-8', index=False)
logger.debug('Data appended to {0}'.format(file_saved))
def create_json(self, csv, json, data):
target = os.path.join(self.dir_current, self.dir_data, csv)
file_saved = os.path.join(self.dir_current, self.dir_data, json)
data = | pd.read_csv(target) | pandas.read_csv |
###############################################################################
# Copyright (c) 2021, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import numpy as np
import pandas as pd
from design import Method
class IndependentMarginals(Method):
"""
Concrete class implementing Independent Marginals.
Attributes:
name = string to be used as reference for the method
"""
def __init__(self, name='IndependentMarginals'):
"""
Validate input hyper-parameters and initialize class.
"""
# initialize parameters
super().__init__(name)
self.min_prob = 1e-5
self.output_directory = ''
def fit(self, data):
""" Estimate marginals distributions from the data. """
self.column_names = data.columns
self.marginal_dist = dict()
self.logger.info('Estimating variables distribution.')
for var in data.columns:
freq_col = data[var].value_counts(normalize=True, sort=False)
# make sure there is a minimum of chance of any element being
# picked
vals = np.maximum(self.min_prob, freq_col.values)
vals = vals/vals.sum()
self.logger.info(var)
self.logger.info(vals)
self.marginal_dist[var] = {'values': freq_col.index.tolist(),
'p': vals}
def generate_samples(self, nb_samples):
"""Generate samples from the independently estimated marginal dist. """
synth_data = np.zeros((nb_samples, len(self.column_names)), dtype=int)
for i, var in enumerate(self.column_names):
arr = self.marginal_dist[var]['values']
pbs = self.marginal_dist[var]['p']
synth_data[:, i] = np.random.choice(arr, size=nb_samples, p=pbs)
samples = | pd.DataFrame(data=synth_data, columns=self.column_names) | pandas.DataFrame |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Setup::
mkdir -p test-data/input
mkdir -p test-data/output
mysql -u root -p
CREATE DATABASE testdb;
CREATE USER 'testusr'@'localhost' IDENTIFIED BY 'test<PASSWORD>';
GRANT ALL PRIVILEGES ON testdb.* TO 'testusr'@'%';
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import d6tstack.utils
import math
import pandas as pd
# import pyarrow as pa
# import pyarrow.parquet as pq
import ntpath
import shutil
import dask.dataframe as dd
import sqlalchemy
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class DebugLogger(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = DebugLogger('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common,allstr=True):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
if allstr:
df_all = df_all[df_all.columns].astype(str)
return df_all
def check_df_colmismatch_combine(dfg,is_common=False, convert_date=True):
dfg = dfg.drop(['filepath','filename'],1).sort_values('date').reset_index(drop=True)
if convert_date:
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
dfchk = create_files_df_colmismatch_combine(is_common,False).reset_index(drop=True)[dfg.columns]
assert dfg.equals(dfchk)
return True
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# scan header
#************************************************************
#************************************************************
def test_csv_sniff(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
# clean
combiner = CombinerCSV(fname_list=create_files_csv)
combiner.sniff_columns()
assert combiner.is_all_equal()
assert combiner.is_column_present().all().all()
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_common'] == combiner.sniff_results['columns_all']
assert combiner.sniff_results['columns_unique'] == []
# extra column
combiner = CombinerCSV(fname_list=create_files_csv_colmismatch)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert not combiner.is_column_present().all().all()
assert combiner.is_column_present().all().values.tolist()==[True, True, True, True, False]
assert combiner.sniff_results['columns_all'] == ['date', 'sales', 'cost', 'profit', 'profit2']
assert combiner.sniff_results['columns_common'] == ['date', 'sales', 'cost', 'profit']
assert combiner.is_column_present_common().columns.tolist() == ['date', 'sales', 'cost', 'profit']
assert combiner.sniff_results['columns_unique'] == ['profit2']
assert combiner.is_column_present_unique().columns.tolist() == ['profit2']
# mixed order
combiner = CombinerCSV(fname_list=create_files_csv_colreorder)
combiner.sniff_columns()
assert not combiner.is_all_equal()
assert combiner.sniff_results['df_columns_order']['profit'].values.tolist() == [3, 3, 2]
def test_csv_selectrename(create_files_csv, create_files_csv_colmismatch):
# rename
df = CombinerCSV(fname_list=create_files_csv).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'notthere':'nan'}).preview_rename()
assert df.empty
df = CombinerCSV(fname_list=create_files_csv, columns_rename={'cost':'cost2'}).preview_rename()
assert df.columns.tolist()==['cost']
assert df['cost'].unique().tolist()==['cost2']
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_rename={'profit2':'profit3'}).preview_rename()
assert df.columns.tolist()==['profit2']
assert df['profit2'].unique().tolist()==[np.nan, 'profit3']
# select
l = CombinerCSV(fname_list=create_files_csv).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
l2 = CombinerCSV(fname_list=create_files_csv, columns_select_common=True).preview_select()
assert l2==l
l = CombinerCSV(fname_list=create_files_csv, columns_select=['date', 'sales', 'cost']).preview_select()
assert l == ['date', 'sales', 'cost']
l = CombinerCSV(fname_list=create_files_csv_colmismatch).preview_select()
assert l == ['date', 'sales', 'cost', 'profit', 'profit2']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).preview_select()
assert l == ['date', 'sales', 'cost', 'profit']
# rename+select
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
l = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}).preview_select()
assert l==['date', 'profit3']
def test_to_pandas(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
df = CombinerCSV(fname_list=create_files_csv).to_pandas()
assert df.shape == (30, 6)
df = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
assert df.shape == (30, 6+1)
assert df['profit2'].isnull().unique().tolist() == [True, False]
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_pandas()
assert df.shape == (30, 6)
assert 'profit2' not in df.columns
# rename+select
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit2'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
df = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select=['date','profit3'], columns_rename={'profit2':'profit3'}, add_filename=False).to_pandas()
assert df.shape == (30, 2)
assert 'profit3' in df.columns and not 'profit2' in df.columns
def test_combinepreview(create_files_csv_colmismatch):
df = CombinerCSV(fname_list=create_files_csv_colmismatch).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('O'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
df = CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).combine_preview()
assert df.shape == (9, 6+1)
assert df.dtypes.tolist() == [np.dtype('<M8[ns]'), np.dtype('int64'), np.dtype('int64'), np.dtype('int64'), np.dtype('float64'), np.dtype('O'), np.dtype('O')]
def test_tocsv(create_files_csv_colmismatch):
fname = 'test-data/output/combined.csv'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_combine(filename=fname)
assert fname == fnameout
df = pd.read_csv(fname)
dfchk = df.copy()
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch, columns_select_common=True).to_csv_combine(filename=fname)
df = pd.read_csv(fname)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'filepath', 'filename']
assert check_df_colmismatch_combine(df,is_common=True)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_csv(fname)
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
helper('test-data/output/')
df = dd.read_csv('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.csv')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert df.reset_index(drop=True).equals(dfchk)
assert check_df_colmismatch_combine(df)
# check creates directory
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
_ = CombinerCSV(fname_list=create_files_csv_colmismatch).to_csv_align(output_dir='test-data/output-tmp')
try:
shutil.rmtree('test-data/output-tmp')
except:
pass
def test_topq(create_files_csv_colmismatch):
fname = 'test-data/output/combined.pq'
fnameout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_combine(filename=fname)
assert fname == fnameout
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (30, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='pyarrow')
assert df2.equals(df)
assert check_df_colmismatch_combine(df)
df = dd.read_parquet(fname)
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
df2 = pd.read_parquet(fname, engine='fastparquet')
assert df2.equals(df)
df3 = pd.read_parquet(fname, engine='pyarrow')
assert df3.equals(df)
assert check_df_colmismatch_combine(df)
def helper(fdir):
fnamesout = CombinerCSV(fname_list=create_files_csv_colmismatch).to_parquet_align(output_dir=fdir)
for fname in fnamesout:
df = pd.read_parquet(fname, engine='fastparquet')
assert df.shape == (10, 4+1+2)
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
helper('test-data/output')
df = dd.read_parquet('test-data/output/d6tstack-test-data-input-csv-colmismatch-*.pq')
df = df.compute()
assert df.columns.tolist() == ['date', 'sales', 'cost', 'profit', 'profit2', 'filepath', 'filename']
assert check_df_colmismatch_combine(df)
# todo: write tests such that compare to concat df not always repeat same code to test shape and columns
def test_tosql(create_files_csv_colmismatch):
tblname = 'testd6tstack'
def apply(dfg):
dfg['date'] = pd.to_datetime(dfg['date'], format='%Y-%m-%d')
return dfg
def helper(uri):
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df)
# with date convert
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_sql_combine(uri, tblname, 'replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
helper(uri)
uri = 'mysql+pymysql://testusr:testpwd@localhost/testdb'
helper(uri)
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.shape == (30, 4+1+2)
assert check_df_colmismatch_combine(df)
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_psql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
uri = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
sql_engine = sqlalchemy.create_engine(uri)
CombinerCSV(fname_list=create_files_csv_colmismatch).to_mysql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.shape == (30, 4+1+2)
assert check_df_colmismatch_combine(df)
# todo: mysql import makes NaNs 0s
CombinerCSV(fname_list=create_files_csv_colmismatch, apply_after_read=apply).to_mysql_combine(uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert check_df_colmismatch_combine(df, convert_date=False)
def test_tosql_util(create_files_csv_colmismatch):
tblname = 'testd6tstack'
uri = 'postgresql+psycopg2://psqlusr:psqlpwdpsqlpwd@localhost/psqltest'
sql_engine = sqlalchemy.create_engine(uri)
dfc = CombinerCSV(fname_list=create_files_csv_colmismatch).to_pandas()
# psql
d6tstack.utils.pd_to_psql(dfc, uri, tblname, if_exists='replace')
df = pd.read_sql_table(tblname, sql_engine)
assert df.equals(dfc)
uri = 'mysql+mysqlconnector://testusr:testpwd@localhost/testdb'
sql_engine = sqlalchemy.create_engine(uri)
d6tstack.utils.pd_to_mysql(dfc, uri, tblname, if_exists='replace')
df = | pd.read_sql_table(tblname, sql_engine) | pandas.read_sql_table |
"""
utilities that are helpful in general model building
"""
import clickhouse_driver
from muti import chu
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.io as pio
import scipy.stats as stats
import math
import os
def r_square(yh, y):
"""
find the r-square for the model implied by yh
:param yh: model output -- either pd.Series or nd.array
:param y: actual values, same type as yh
:return: r-squared
:rtype float
"""
res_full = y - yh
res_reduced = y - y.mean()
r2 = 100.0 * (1.0 - np.square(res_full).sum() / np.square(res_reduced).sum())
return float(r2)
def get_unique_levels(feature: str, client: clickhouse_driver.Client, table: str, cnt_min=None):
"""
Retrieves the unique levels of the column 'feature' in the table 'table' of database db.
At most 1000 are returned.
:param feature: column name in db.table to get unique levels
:param client: clickhouse client connector
:param table: table name (with db)
:param cnt_min: minimum count for a level to be returned
:return: list of unique levels and the most frequent level
:rtype list, <value>
"""
qry = 'SELECT {0} AS grp, count(*) as nl FROM {1} GROUP BY grp'.format(feature, table)
if cnt_min is not None:
qry += ' HAVING nl > ' + str(cnt_min)
qry += ' ORDER BY nl DESC LIMIT 1000'
df = chu.run_query(qry, client, return_df=True)
most_freq_level = df.iloc[0]['grp']
df.sort_values('grp')
u = list(df['grp'])
return u, most_freq_level
def get_closest(ul: list, field: str, target: str, table: str,
client: clickhouse_driver.Client, print_details=True):
"""
This function is designed to select the out-of-list default value for an embedding. It selects this value
as the in-list value which has target mean closest to the average value of all out-of-list values
:param ul: in-list values
:param field: name of field we're working on
:param target: target field used for assessing 'close'
:param table: table to use
:param client: clickhouse client
:param print_details: if True, prints info about the outcome
:return: value of in-list elements with average closest to out-of-list averages
"""
qry = """
/*
we have a feature that has lots of levels. Some levels are part of the embedding. For those that aren't
we want to find the default value -- that level which has the closest mean of a target to them.
TTTT list of values called out in embedding
XXXX field we're working with
YYYY target variable
ZZZZ db.table to query
*/
SELECT
XXXX AS grp,
avg(YYYY) AS in_avg,
(SELECT
avg(YYYY)
FROM
ZZZZ
WHERE
XXXX not in (TTTT)) AS out_avg,
abs(in_avg - out_avg) AS mad
FROM
ZZZZ
WHERE
grp in (TTTT)
GROUP BY grp
ORDER BY mad
LIMIT 1
"""
repl = ''
for j, u in enumerate(ul):
if j != 0:
repl += ', '
repl += "'" + u + "'"
df = chu.run_query(qry, client, return_df=True,
replace_source=['TTTT', 'XXXX', 'YYYY', 'ZZZZ'],
replace_dest=[repl, field, target, table])
if print_details:
print('Out-of-list element selection for field {0} using target {1}'.format(field, target))
print(df)
print('\n')
return df.iloc[0]['grp']
def cont_hist(yh, y, title='2D Contour Histogram', xlab='Model Output', ylab='Y', subtitle=None, plot_dir=None,
in_browser=False):
"""
Make a 2D contour histogram plot of y vs yh.
The plot is produced in the browser and optionally written to a file.
:param yh: Model outputs -- nd.array or pd.Series
:param y: Target value, same type as yh
:param title: Title for plot
:param xlab: x-axis label
:param ylab: y-axis label
:param subtitle: optional subtitle
:param plot_dir: optional file to write graph to
:param in_browser: if True plot to browser
:return:
"""
fig = [go.Histogram2dContour(x=yh, y=y)]
min_value = min([yh.min(), y.quantile(.01)])
max_value = max([yh.max(), y.quantile(.99)])
fig += [go.Scatter(x=[min_value, max_value], y=[min_value, max_value],
mode='lines', line=dict(color='red'))]
if subtitle is not None:
title += title + '<br>' + subtitle
layout = go.Layout(title=dict(text=title, x=0.5),
height=800, width=800,
xaxis=dict(title=xlab),
yaxis=dict(title=ylab))
figx = go.Figure(fig, layout=layout)
if in_browser:
figx.show()
if plot_dir is not None:
figx.write_image(plot_dir + 'png/model_fit.png')
figx.write_html(plot_dir + 'html/model_fit.html')
def ks_calculate(score_variable: pd.Series, binary_variable: pd.Series, plot=False, xlab='Score', ylab='CDF',
title='KS Plot', subtitle=None, plot_dir=None, out_file=None, in_browser=False):
"""
Calculates the KS (Kolmogorov Smirnov) distance between two cdfs. The KS statistic is 100 times the
maximum vertical difference between the two cdfs
The single input score_variable contains values from the two populations. The two populations are distinguished
by the value of binvar (0 means population A, 1 means population B).
Optionally, the plot of the CDF of score variable for the two values of binary_variable may be plotted.
:param score_variable: continuous variable from the logistic regression
:param binary_variable: binary outcome (dependent) variable from the logistic regression
:param plot: creates a graph if True
:param xlab: label for the x-axis (score variable), optional
:param ylab: label for the y-axis (binary variable), optional
:param title: title for the plot, optional
:param subtitle: subtitle for the plot, optional (default=None)
:param plot_dir: directory to write plot to
:param out_file file name for writing out the plot
:param in_browser: if True, plots to browser
:return: KS statistic (0 to 100),
:rtype: float
"""
if isinstance(score_variable, np.ndarray):
score_variable = pd.Series(score_variable)
if isinstance(binary_variable, np.ndarray):
binary_variable = | pd.Series(binary_variable) | pandas.Series |
"""
Tests that apply specifically to the Python parser. Unless specifically
stated as a Python-specific issue, the goal is to eventually move as many of
these tests out of this module as soon as the C parser can accept further
arguments when parsing.
"""
import csv
from io import BytesIO, StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_default_separator(python_parser_only):
# see gh-17333
#
# csv.Sniffer in Python treats "o" as separator.
data = "aob\n1o2\n3o4"
parser = python_parser_only
expected = DataFrame({"a": [1, 3], "b": [2, 4]})
result = parser.read_csv(StringIO(data), sep=None)
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#Imports
import requests
import json
import os
import sys
import pandas as pd
import numpy as np
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash import Dash
from dash.dependencies import Output, Input, State
from dash.exceptions import PreventUpdate
import plotly.express as px
import plotly.graph_objects as go
from datetime import datetime
#Constants
API_BASE = 'https://api.coingecko.com/api/v3/'
PING = '/ping'
ASSET_PLAT = '/asset_platforms'
COIN_LIST = '/coins/list'
ID_BTC = '/coins/bitcoin'
#Index Average Settings
#short
INDEX_WINDOW = 12
W = np.linspace(0, 1, INDEX_WINDOW)
#long - to be added
#Helpful Function Declarations:
#1. Volatility Index Calculation Function
def VolIndexFunc(series):
"""
Calculate naive Volatility Index given a pandas Series with Alphas
alpha = abs(z-score)
"""
return np.exp(2*series)
#2. Data from API
def get_current_coin_chart_data(coin_id, currency, days):
"""
Returns data at fixed time interval for given
coin_id vs curr.
Returns JSON obj.
"""
EXT = f'/coins/{coin_id}/market_chart?vs_currency={currency}&days={days}'
print(EXT)
res = requests.get(API_BASE+EXT)
data = json.loads(res.text)
return data
#Get Coin List from Data
coin_list_df = pd.read_csv('data/coin_list.csv')
#Start App
app = Dash(__name__, external_stylesheets=[dbc.themes.SLATE])
#LAYOUT
app.layout = html.Div([
dbc.Card(
dbc.CardBody([
dbc.Row([
dbc.Col([
html.H1('CryptoViz')
], width=3),
dbc.Col([
], width=3),
dbc.Col([
], width=3),
], align='left'),
dbc.Row([
dbc.Col([
], width=3),
dbc.Col([
dcc.Location(id='dropdown_1'),
html.Div(id='page-content1'),
], width=3),
dbc.Col([
html.Div(id='cur_index_disp'),
], width=3),
dbc.Col([
dcc.Location(id='dropdown_2'),
html.Div(id='page-content2'),
], width=3),
], align='center'),
html.Br(),
dbc.Row([
dbc.Col([
dcc.Graph(id='main_price_graph')
], width=7),
dbc.Col([
dcc.Location(id='compare_table')
], width=2),
dbc.Col([
], width=3),
], align='center'),
html.Br(),
dbc.Row([
dbc.Col([
dcc.Graph(id='main_graph_2')
], width=7),
dbc.Col([
dcc.RadioItems(id = 'graph2_ctrl',
options=[
{'label': 'Volume', 'value': 'vols'},
{'label': 'Market Cap', 'value': 'mcap'},
{'label': 'Volatility', 'value': 'vlty'},
],
value = 'vols',
labelStyle={'display': 'block'}
),
dcc.Location(id='radio_out1'),
], width=2),
dbc.Col([
], width=3),
], align='center'),
]), color = 'dark'
),
html.Footer(id='footer_text',
children = [
html.Div([
html.P('Powered by CoinGecko.'),
html.A('CoinGecko/API', href='https://www.coingecko.com/en/api')
])
]
),
dcc.Store(id='coin_id_data'),
])
#CALLBACKS
@app.callback(Output('page-content1', 'children'), Input('dropdown_1', 'value'))
def generate_layout(dropdown_1):
return html.Div([
# html.Label('Multi-Select Dropdown'),
dcc.Dropdown(
options=[{'label': name, 'value': coin_id} for name, coin_id in zip(coin_list_df['name'].to_list(), coin_list_df['id'].to_list())],
#options=[{'label': 'New York City', 'value': 'NYC'},{'label': 'Montréal', 'value': 'MTL'},{'label': 'San Francisco', 'value': 'SF'}],
multi=False,
id='input'
),
])
@app.callback(Output('page-content2', 'children'), Input('dropdown_2', 'value'))
def generate_layout_2(dropdown_1):
return html.Div([
# html.Label('Multi-Select Dropdown'),
dcc.Dropdown(
options=[{'label': name, 'value': coin_id} for name, coin_id in zip(coin_list_df['name'].to_list(), coin_list_df['id'].to_list())],
#options=[{'label': 'New York City', 'value': 'NYC'},{'label': 'Montréal', 'value': 'MTL'},{'label': 'San Francisco', 'value': 'SF'}],
multi=True,
id='input2'
),
])
@app.callback(Output('coin_id_data', 'data'), Input('input', 'value'))
def get_maingraph_data(value):
data = get_current_coin_chart_data(coin_id=value, currency='usd', days=30)
chart_df = pd.DataFrame(data)
# UNIX Time
unix_t = chart_df['prices'].apply(lambda x: x[0]/1000)
#Date and Time
t = pd.to_datetime(unix_t, unit='s')
# Price
p = chart_df['prices'].apply(lambda x: x[1])
# Volume
v = chart_df['total_volumes'].apply(lambda x:x[1])
# Market Caps
m_caps = chart_df['market_caps'].apply(lambda x:x[1])
# Making the final frame.
frame_col = {'datetime': t, 'unixtime':unix_t , 'prices': p, 't_vols': v, 'market_c': m_caps}
final_df = pd.DataFrame(frame_col)
final_df['mavg_5'] = final_df['prices'].rolling(window=5).mean()
final_df['mavg_20'] = final_df['prices'].rolling(window=20).mean()
final_df['exmavg_24'] = final_df['prices'].ewm(span=24, adjust=True).mean()
final_df['stddev_5'] = final_df['prices'].rolling(window=5).std()
final_df['z_score_5'] = (final_df['prices'] - final_df['mavg_5'])/final_df['stddev_5']
final_df['alpha_5'] = np.abs(final_df['z_score_5'])
final_df['vol_index_5'] = VolIndexFunc(final_df['alpha_5'])
final_df['index_avg'] = final_df['vol_index_5'].rolling(window=INDEX_WINDOW).apply(lambda x: sum((W*x)))
return final_df.to_json(date_format='iso', orient='split')
@app.callback(Output('main_price_graph', 'figure'), Input('coin_id_data', 'data'), Input('input', 'value'))
def render_price(j_data, value):
if value is None:
raise PreventUpdate
else:
gr_data_df = pd.read_json(j_data, orient='split')
#Price Graph, so y is 'prices'
fig = px.line(data_frame=gr_data_df, x='datetime', y=['prices', 'mavg_5', 'mavg_20', 'exmavg_24'])
fig.update_xaxes(rangeslider_visible=True)
return fig
@app.callback(Output('main_graph_2', 'figure'), Input('coin_id_data', 'data'), Input('input', 'value'), Input('graph2_ctrl', 'value'))
def render_vols(j_data, value, choice):
if value is None:
raise PreventUpdate
else:
gr_data_df = | pd.read_json(j_data, orient='split') | pandas.read_json |
"""Transformer for datetime data."""
import numpy as np
import pandas as pd
from pandas.core.tools.datetimes import _guess_datetime_format_for_array
from rdt.transformers.base import BaseTransformer
from rdt.transformers.null import NullTransformer
class UnixTimestampEncoder(BaseTransformer):
"""Transformer for datetime data.
This transformer replaces datetime values with an integer timestamp
transformed to float.
Null values are replaced using a ``NullTransformer``.
Args:
missing_value_replacement (object or None):
Indicate what to do with the null values. If an object is given, replace them
with the given value. If the strings ``'mean'`` or ``'mode'`` are given, replace
them with the corresponding aggregation. If ``None`` is given, do not replace them.
Defaults to ``None``.
model_missing_values (bool):
Whether to create a new column to indicate which values were null or not. The column
will be created only if there are null values. If ``True``, create the new column if
there are null values. If ``False``, do not create the new column even if there
are null values. Defaults to ``False``.
datetime_format (str):
The strftime to use for parsing time. For more information, see
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.
"""
INPUT_SDTYPE = 'datetime'
DETERMINISTIC_TRANSFORM = True
DETERMINISTIC_REVERSE = True
COMPOSITION_IS_IDENTITY = True
null_transformer = None
def __init__(self, missing_value_replacement=None, model_missing_values=False,
datetime_format=None):
self.missing_value_replacement = missing_value_replacement
self.model_missing_values = model_missing_values
self.datetime_format = datetime_format
def is_composition_identity(self):
"""Return whether composition of transform and reverse transform produces the input data.
Returns:
bool:
Whether or not transforming and then reverse transforming returns the input data.
"""
if self.null_transformer and not self.null_transformer.models_missing_values():
return False
return self.COMPOSITION_IS_IDENTITY
def get_output_sdtypes(self):
"""Return the output sdtypes supported by the transformer.
Returns:
dict:
Mapping from the transformed column names to supported sdtypes.
"""
output_sdtypes = {
'value': 'float',
}
if self.null_transformer and self.null_transformer.models_missing_values():
output_sdtypes['is_null'] = 'float'
return self._add_prefix(output_sdtypes)
def _convert_to_datetime(self, data):
if data.dtype == 'object':
try:
pandas_datetime_format = None
if self.datetime_format:
pandas_datetime_format = self.datetime_format.replace('%-', '%')
data = pd.to_datetime(data, format=pandas_datetime_format)
except ValueError as error:
if 'Unknown string format:' in str(error):
message = 'Data must be of dtype datetime, or castable to datetime.'
raise TypeError(message) from None
raise ValueError('Data does not match specified datetime format.') from None
return data
def _transform_helper(self, datetimes):
"""Transform datetime values to integer."""
datetimes = self._convert_to_datetime(datetimes)
nulls = datetimes.isna()
integers = pd.to_numeric(datetimes, errors='coerce').to_numpy().astype(np.float64)
integers[nulls] = np.nan
transformed = pd.Series(integers)
return transformed
def _reverse_transform_helper(self, data):
"""Transform integer values back into datetimes."""
if not isinstance(data, np.ndarray):
data = data.to_numpy()
if self.missing_value_replacement is not None:
data = self.null_transformer.reverse_transform(data)
data = np.round(data.astype(np.float64))
return data
def _fit(self, data):
"""Fit the transformer to the data.
Args:
data (pandas.Series):
Data to fit the transformer to.
"""
if self.datetime_format is None and data.dtype == 'object':
self.datetime_format = _guess_datetime_format_for_array(data.to_numpy())
transformed = self._transform_helper(data)
self.null_transformer = NullTransformer(
self.missing_value_replacement,
self.model_missing_values
)
self.null_transformer.fit(transformed)
def _transform(self, data):
"""Transform datetime values to float values.
Args:
data (pandas.Series):
Data to transform.
Returns:
numpy.ndarray
"""
data = self._transform_helper(data)
return self.null_transformer.transform(data)
def _reverse_transform(self, data):
"""Convert float values back to datetimes.
Args:
data (pandas.Series or numpy.ndarray):
Data to transform.
Returns:
pandas.Series
"""
data = self._reverse_transform_helper(data)
datetime_data = pd.to_datetime(data)
if not isinstance(datetime_data, pd.Series):
datetime_data = | pd.Series(datetime_data) | pandas.Series |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['4', '1', '2', '3.5']})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [4.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.0']})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.5]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.5', 2, 3.5]})
flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.5']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 2, 3.5, 'TD']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_mixed_type_column_mixed_type_dict_filter_preserve_type(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS']})
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS']})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_int_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': [1, 2, 3, 4, 5, 6],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 2, 2, 3, 4, None]}, dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': [2, 3, 5],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': [1, 4, 6],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_float_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.2, 2.1, 2.1, 3.3, 4.2, None]})
flag_dict = {'flag1': [2.1, 4.2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2.1, 2.1, 4.2]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.2, 3.3, None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_str_flag_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': ['a', 'b', 'b', 'c', 'd', None]})
flag_dict = {'flag1': ['b', 'd']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': ['b', 'b', 'd']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': ['a', 'c', None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2.0, 'TD', 2.0, None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2.0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2.0, 2.0]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.5, 2, 2, 'TD', 4, None]},
dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.5, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_same_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [1, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD'], 'flag2': [0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [1, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_different_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [2, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD', 'NS'], 'flag2': [0, 2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [2, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
| assert_frame_equal(df_new, df_new_expected) | pandas.testing.assert_frame_equal |
# ========== (c) <NAME> 3/8/21 ==========
import logging
import pandas as pd
import numpy as np
import plotly.express as px
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
root_logger.addHandler(sh)
desired_width = 320
| pd.set_option('display.max_columns', 20) | pandas.set_option |
#Creo el dataset para la predicción del boosting
import gc
gc.collect()
import pandas as pd
import seaborn as sns
import numpy as np
#%% marzo
marzo = pd.read_csv(r'C:\Users\argomezja\Desktop\Data Science\MELI challenge\Project MELI\Dataset_limpios\marzo_limpio.csv.gz')
marzo = marzo.loc[marzo['day']>=4].reset_index(drop=True)
marzo['day']=marzo['day']-3
#Trabajo mejor el price
marzo = marzo.assign(current_price=marzo.groupby('currency').transform(lambda x: (x - x.min()) / (x.max()- x.min())))
subtest1 = marzo[['sku', 'day', 'sold_quantity']]
subtest1= subtest1.pivot_table(index = 'sku', columns= 'day', values = 'sold_quantity').add_prefix('sales')
subtest2 = marzo[['sku', 'day', 'current_price']]
subtest2= subtest2.pivot_table(index = 'sku', columns= 'day', values = 'current_price').add_prefix('price')
subtest3 = marzo[['sku', 'day', 'minutes_active']]
subtest3= subtest3.pivot_table(index = 'sku', columns= 'day', values = 'minutes_active').add_prefix('active_time')
subtest4 = marzo[['sku', 'day', 'listing_type']]
subtest4= subtest4.pivot_table(index = 'sku', columns= 'day', values = 'listing_type').add_prefix('listing_type')
subtest6 = marzo[['sku', 'day', 'shipping_logistic_type']]
subtest6= subtest6.pivot_table(index = 'sku', columns= 'day', values = 'shipping_logistic_type').add_prefix('shipping_logistic_type')
subtest7 = marzo[['sku', 'day', 'shipping_payment']]
subtest7= subtest7.pivot_table(index = 'sku', columns= 'day', values = 'shipping_payment').add_prefix('shipping_payment')
final = pd.merge(subtest1, subtest2, left_index=True, right_index=True )
final = pd.merge(final, subtest3, left_index=True, right_index=True)
final = pd.merge(final, subtest4, left_index=True, right_index=True)
final = pd.merge(final, subtest6, left_index=True, right_index=True)
final = | pd.merge(final, subtest7, left_index=True, right_index=True) | pandas.merge |
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = | pd.Timestamp(year=2010, month=1, day=1) | pandas.Timestamp |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = | pd.Series([], dtype="float") | pandas.Series |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
| pd.testing.assert_frame_equal(x,expectedX,check_dtype=False) | pandas.testing.assert_frame_equal |
import glob
import numpy as np
import pandas as pd
from collections import OrderedDict
#from . import metrics
import metrics
from .csv_reader import csv_node
__all__ = ['tune_threshold',
'assemble_node',
'assemble_dev_threshold',
'metric_reading',
'Ensemble']
def tune_threshold(y_true, y_prob, metric="f1_score"):
if isinstance(metric, str):
metric = getattr(metrics, metric)
thresholds = np.arange(0.01, 1, 0.01)
best_score = 0.0
best_threshold = 0.5
for threshold in thresholds:
y_pred = np.array([1 if p > threshold else 0 for p in y_prob])
cur_score = metric(y_true, y_pred)
if cur_score > best_score:
best_score = cur_score
best_threshold = threshold
print("Tuned threshold: {:.4f}".format(best_threshold))
return best_threshold
def assemble_node(nodes, key="Y_PROBA", method="median", PIDs=None):
if isinstance(method, str):
method = getattr(np, method)
if PIDs is None:
PIDs = nodes[0].PID
probas = []
for pid in PIDs:
proba = method([x.data[pid][key] for x in nodes])
probas.append(proba)
return np.array(probas)
def assemble_dev_threshold(nodes, method="median", metric="f1_score", PIDs=None):
y_prob = assemble_node(nodes, key="Y_PROBA", method=method, PIDs=PIDs)
y_true = nodes[0].extract("Y_TRUE", PIDs)
threshold = tune_threshold(y_true, y_prob, metric)
return threshold
def metric_reading(y_true, y_pred, y_proba):
if isinstance(y_true, list):
readings = [metric_reading(y_true_, y_pred_, y_proba_)
for y_true_,y_pred_,y_proba_ in zip(y_true, y_pred, y_proba)]
return readings
else:
scores = metrics.classification_summary(y_true, y_pred, [0,1], y_proba, verbose=False)
reading = OrderedDict([('Pos.Acc',scores['pos_acc']*100.0),
('Neg.Acc',scores['neg_acc']*100.0),
('Precision',scores['precision']*100.0),
('Recall',scores['recall']*100.0),
('F1',scores['f1']*100.0),
('ROC',scores['roc']*100.0),
('PRC',scores['prc']*100.0),
('NDCG',scores['ndcg']*100.0),
('TP',scores['tp']),
('FP',scores['fp']),
('TN',scores['tn']),
('FN',scores['fn'])])
return reading
class Ensemble(object):
def __init__(self, results_csvs, dev_csvs, pids=None):
self.results_csvs = results_csvs
self.dev_csvs = dev_csvs
self.build(pids)
@classmethod
def from_keyword(klass, test_keyword, dev_keyword, pids=None):
test_csvs = glob.glob(test_keyword, recursive=True)
dev_csvs = glob.glob(dev_keyword, recursive=True)
return klass(test_csvs, dev_csvs, pids)
@classmethod
def from_folder(klass, results_folder, dev_folder, pids=None):
results_csvs = glob.glob("{}/**/predictions*.csv".format(results_folder), recursive=True)
dev_csvs = glob.glob("{}/**/predictions*.csv".format(dev_folder), recursive=True)
return klass(results_csvs, dev_csvs, pids)
def build(self, pids=None):
self.results = [csv_node.from_csv(x) for x in self.results_csvs]
self.devs = [csv_node.from_csv(x) for x in self.dev_csvs]
self.results = sorted(self.results, key=lambda x: x.seed)
self.devs = sorted(self.devs, key=lambda x: x.seed)
if pids is None:
self.pids = list(self.results[0].PID)
else:
self.pids = pids
try:
self.score_list = self.get_seeds_score_list()
self.score = True
except:
self.score = False
self.proba_list = self.get_seeds_proba_list()
self.pred_list = self.get_seeds_pred_list()
@property
def score_dataframe(self):
return pd.DataFrame(OrderedDict(self.score_list_head+self.score_list))
@property
def proba_dataframe(self):
return pd.DataFrame(OrderedDict(self.proba_list_head+self.proba_list))
@property
def pred_dataframe(self):
return pd.DataFrame(OrderedDict(self.pred_list_head+self.pred_list))
def get_df_by_seed(self, key="Y_PROBA"):
seeds = [x.seed for x in self.results]
probas = [x.extract(key, self.pids) for x in self.results]
df_dict = OrderedDict([("PID", self.pids)] + \
[("SEED_{}".format(seed), proba) for seed, proba in zip(seeds, probas)])
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = | pd.Series([1, 2, 3]) | pandas.Series |
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
| tm.assert_series_equal(obj, objs[leaf]) | pandas._testing.assert_series_equal |
# ============================================================================
# Piotroski f score implementation (data scraped from yahoo finance)
# Author - <NAME>
# Please report bugs/issues in the Q&A section
# =============================================================================
import requests
from bs4 import BeautifulSoup
import pandas as pd
tickers = ["AXP","AAPL","BA","CAT","CVX","CSCO","DIS","DOW", "XOM",
"HD","IBM","INTC","JNJ","KO","MCD","MMM","MRK","MSFT",
"NKE","PFE","PG","TRV","UTX","UNH","VZ","V","WMT","WBA"]
#list of tickerList whose financial data needs to be extracted
financial_dir_cy = {} #directory to store current year's information
financial_dir_py = {} #directory to store last year's information
financial_dir_py2 = {} #directory to store last to last year's information
for ticker in tickers:
try:
print("scraping financial statement data for ",ticker)
temp_dir = {}
temp_dir2 = {}
temp_dir3 = {}
#getting balance sheet data from yahoo finance for the given ticker
url = 'https://in.finance.yahoo.com/quote/'+ticker+'/balance-sheet?p='+ticker
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
tabl = soup.find_all("div", {"class" : "M(0) Mb(10px) Whs(n) BdEnd Bdc($seperatorColor) D(itb)"})
for t in tabl:
rows = t.find_all("div", {"class" : "rw-expnded"})
for row in rows:
temp_dir[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[1]
temp_dir2[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[2]
temp_dir3[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[3]
#getting income statement data from yahoo finance for the given ticker
url = 'https://in.finance.yahoo.com/quote/'+ticker+'/financials?p='+ticker
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
tabl = soup.find_all("div", {"class" : "M(0) Mb(10px) Whs(n) BdEnd Bdc($seperatorColor) D(itb)"})
for t in tabl:
rows = t.find_all("div", {"class" : "rw-expnded"})
for row in rows:
temp_dir[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[1]
temp_dir2[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[2]
temp_dir3[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[3]
#getting cashflow statement data from yahoo finance for the given ticker
url = 'https://in.finance.yahoo.com/quote/'+ticker+'/cash-flow?p='+ticker
page = requests.get(url)
page_content = page.content
soup = BeautifulSoup(page_content,'html.parser')
tabl = soup.find_all("div", {"class" : "M(0) Mb(10px) Whs(n) BdEnd Bdc($seperatorColor) D(itb)"})
for t in tabl:
rows = t.find_all("div", {"class" : "rw-expnded"})
for row in rows:
temp_dir[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[1]
temp_dir2[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[2]
temp_dir3[row.get_text(separator='|').split("|")[0]]=row.get_text(separator='|').split("|")[3]
#combining all extracted information with the corresponding ticker
financial_dir_cy[ticker] = temp_dir
financial_dir_py[ticker] = temp_dir2
financial_dir_py2[ticker] = temp_dir3
except:
print("Problem scraping data for ",ticker)
#storing information in pandas dataframe
combined_financials_cy = | pd.DataFrame(financial_dir_cy) | pandas.DataFrame |
# -*- coding: utf-8 -*-
#!/usr/bin/python3
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2022"
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__status__ = "Dev"
import textwrap
import pandas as pd
import numpy as np
import json
import argparse
parser = argparse.ArgumentParser(description='Operon classification. ')
parser.add_argument('final_tab_file', type=str,
help='a feature table file from genbank')
parser.add_argument('opr_file', type=str,
help='a operon file from door2 database')
args = parser.parse_args()
fin_tab_file = args.final_tab_file
opr_file = args.opr_file
#fin_tab_file = 'Acetobacter_pasteurianus_IFO_3283-01-42C\GCA_000010945.1_ASM1094v1_feature_table.txt'
#opr_file = 'Acetobacter_pasteurianus_IFO_3283-01-42C\Acetobacter_pasteurianus_IFO_3283-01-42C_NC_017150.txt'
#fin_tab_file = 'test/GCA_000005845.2_ASM584v2_feature_table.txt'
#opr_file = 'test/Escherichia_coli strK-12_substr_MG1655_NC_000913(C).opr'
print('Feature file: ' + fin_tab_file.split('/')[-1])
print('Operon file: ' + opr_file.split('/')[-1])
def op_classify_gene(gene_name):
if (gene_name in ref_rib_genes):
gene_type = 'translational'
elif (gene_name in ref_rnapolym_genes):
gene_type = 'transcriptional'
else:
gene_type = 'none'
return gene_type
def op_classify_name(name):
if any([s for s in ref_rib_func if s.lower() in name]) or (any([s for s in name if s in ref_rib_genes])):
gene_type = 'translational'
elif any([s for s in ref_rnapolym_func if s.lower() in name]) or (any([s for s in name if s in ref_rnapolym_genes])):
gene_type = 'transcriptional'
else:
gene_type = 'none'
return gene_type
nc_file = pd.read_csv(opr_file, sep='\t', comment='<')
nc_dict = dict()
for i in range(0,len(nc_file)):
nc_dict[nc_file['Synonym'][i]] = nc_file['Product'][i]
locus_gene_dict = dict()
locus_name_dict = dict()
final_tab = pd.read_csv(fin_tab_file, sep='\t')
if final_tab['locus_tag'].isnull().values.all():
# #print (opr_file.split('/')[-1])
print ('\t Cannot process! Missing locus_tag column!' )
# print (opr_file.split('/')[-2].replace('_',' ') + '\t' + opr_file.split('/')[-1])
exit()
final_tab['symbol'] = final_tab['symbol'].replace(np.nan, 'no_symbol', regex=True)
final_tab['name'] = final_tab['name'].replace(np.nan,'hypothetical protein', regex=True)
for i in range(0,len(final_tab)):
if 'gene' == final_tab['# feature'][i]:
continue
locus_gene_dict[final_tab['locus_tag'][i]] = final_tab['symbol'][i]
if final_tab['locus_tag'][i] in nc_dict.keys():
locus_name_dict[final_tab['locus_tag'][i]] = nc_dict[final_tab['locus_tag'][i]]
else:
locus_name_dict[final_tab['locus_tag'][i]] = 'hypothetical protein'
opr_input = pd.read_csv(opr_file,sep='\t', comment='<')
opr_input['Synonym'] = opr_input[['OperonID','Synonym']].groupby(['OperonID'])['Synonym'].transform(lambda x: ','.join(x))
opr_input['COG_number'] = opr_input[['OperonID','COG_number']].groupby(['OperonID'])['COG_number'].transform(lambda x: ','.join(x))
opr_input = opr_input.drop_duplicates(subset='OperonID', keep='first') # Remove duplicate rows
operon_start = []
operon_stop = []
operon_list = []
operon_cognum = []
for l in opr_input.index:
operon_list.append(opr_input['Synonym'][l])
operon_start.append(opr_input['Start'][l])
operon_stop.append(opr_input['End'][l])
operon_cognum.append(opr_input['COG_number'][l])
rnapolym_input = pd.read_csv('RNApolymerase_F.txt', sep='\t')
ref_rnapolym_genes = [x.lower() for x in rnapolym_input["Gene"]]
ribosomes_input = | pd.read_csv('Ribosomes_newlist_F.txt', sep='\t') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/21 0021
# @Author : justin.郑 <EMAIL>
# @File : index_baidu.py
# @Desc : 获取百度指数
import json
import urllib.parse
import pandas as pd
import requests
def decrypt(t: str, e: str) -> str:
"""
解密函数
:param t:
:type t:
:param e:
:type e:
:return:
:rtype:
"""
n, i, a, result = list(t), list(e), {}, []
ln = int(len(n) / 2)
start, end = n[ln:], n[:ln]
a = dict(zip(end, start))
return "".join([a[j] for j in e])
def get_ptbk(uniqid: str, cookie: str) -> str:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
"Cookie": cookie,
"Host": "index.baidu.com",
"Referer": "http://index.baidu.com/v2/main/index.html",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
session = requests.Session()
session.headers.update(headers)
with session.get(
url=f"http://index.baidu.com/Interface/ptbk?uniqid={uniqid}"
) as response:
ptbk = response.json()["data"]
return ptbk
def baidu_interest_index(word, cookie):
"""
百度指数 人群画像兴趣分布
:param word: 关键词
:param cookie:
:return:
desc 兴趣分类
tgi TGI指数
word_rate 关键词分布比率
all_rate 全网分布比率
period 周期范围
"""
try:
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Cache-Control": "no-cache",
"Cookie": cookie,
"DNT": "1",
"Host": "zhishu.baidu.com",
"Pragma": "no-cache",
"Proxy-Connection": "keep-alive",
"Referer": "zhishu.baidu.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "http://index.baidu.com/api/SocialApi/interest?wordlist[]=%s" % word
r = requests.get(url=url, headers=headers)
data = json.loads(r.text)['data']
period = "%s|%s" % (data['startDate'], data['endDate'])
age_list = data['result'][0]['interest']
age_df = | pd.DataFrame(age_list) | pandas.DataFrame |
# Importing Data in Python (Part 1) on Data Camp
#######################################
# Part 1: Introduction and flat files
#######################################
## Importing entire text files
# Open a file: file
file = open('moby_dick.txt', mode='r')
# Print it
print(file.read())
# Check whether file is closed
print(file.closed)
# Close file
file = file.close()
# Check whether file is closed
print(file)
## Importing text files line by line
# Read & print the first 3 lines
with open('moby_dick.txt') as file:
print(file.readline())
print(file.readline())
print(file.readline())
## Using NumPy to import flat files
# Import package
import numpy as np
# Assign filename to variable: file
file = 'digits.csv'
# Load file as array: digits
digits = np.loadtxt(file, delimiter=',')
# Print datatype of digits
print(type(digits))
# Select and reshape a row
im = digits[21, 1:]
im_sq = np.reshape(im, (28, 28))
# Plot reshaped data (matplotlib.pyplot already loaded as plt)
plt.imshow(im_sq, cmap='Greys', interpolation='nearest')
plt.show()
## Customizing your NumPy import
# Import numpy
import numpy as np
# Assign the filename: file
file = 'digits_header.txt'
# Load the data: data
data = np.loadtxt(file, delimiter='\t', skiprows=1, usecols=[0,2])
# Print data
print(data)
## Importing different datatypes
# Assign filename: file
file = 'seaslug.txt'
# Import file: data
data = np.loadtxt(file, delimiter='\t', dtype=str)
# Print the first element of data
print(data[0])
# Import data as floats and skip the first row: data_float
data_float = np.loadtxt(file, delimiter='\t', dtype=float, skiprows=1)
# Print the 10th element of data_float
print(data_float[9])
# Plot a scatterplot of the data
plt.scatter(data_float[:, 0], data_float[:, 1])
plt.xlabel('time (min.)')
plt.ylabel('percentage of larvae')
plt.show()
## Working with mixed datatypes (2)
# Assign the filename: file
file = 'titanic.csv'
# Import file using np.recfromcsv: d
d = np.recfromcsv(file, delimiter=',', names=True, dtype=None)
# Print out first three entries of d
print(d[:3])
## Using pandas to import flat files as DataFrames (1)
# Import pandas as pd
import pandas as pd
# Assign the filename: file
file = 'titanic.csv'
# Read the file into a DataFrame: df
df = pd.read_csv(file)
# View the head of the DataFrame
print(df.head())
## Using pandas to import flat files as DataFrames (2)
# Assign the filename: file
file = 'digits.csv'
# Read the first 5 rows of the file into a DataFrame: data
data = pd.read_csv(file, nrows=5, header=None)
# Build a numpy array from the DataFrame: data_array
data_array = data.values
# Print the datatype of data_array to the shell
print(type(data_array))
## Customizing your pandas import
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Assign filename: file
file = 'titanic_corrupt.txt'
# Import file: data
data = pd.read_csv(file, sep='\t', comment='#', na_values=['Nothing'])
# Print the head of the DataFrame
print(data.head())
# Plot 'Age' variable in a histogram
| pd.DataFrame.hist(data[['Age']]) | pandas.DataFrame.hist |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from joblib import Memory
import datetime
from azure_table_interface import query_aq_data
# Set up caching for the Azure table access
memory = Memory('./_cache_')
ID_to_name = {'nesta-1': 'Priory Rd (South)',
'nesta-2': 'Priory Rd (North)',
'nesta-2-1': 'Priory Rd (North)',
'nesta-4': 'Horseshoe Bridge',
'nesta-5': 'Kent Rd',
'nesta-6': 'Portswood Rd',
'nesta-7': 'St Denys Rd',
'nesta-8': 'Priory Rd-Kent Rd junction',
'nesta-9': 'Riverside',
'nesta-11': 'St Denys Church',
'nesta-12': 'Hillside Ave',
'nesta-14': 'Beechwood Junior School'}
TICKS_TWO_HOURLY = [datetime.time(hour, 0) for hour in range(0, 24, 2)]
@memory.cache
def get_sensor_data(sensor_id, **kwargs):
res = query_aq_data(sensor_id,
**kwargs)
res = res.resample('15min').mean()
if len(res.columns) == 1:
res.columns = [sensor_id.split("_")[1]]
return res
def get_all_sensor_data(col='pm25', from_date='2019-01-01'):
sensor_ids = ['aq-deployment_' + k for k in ID_to_name.keys()]
print(sensor_ids)
dfs = [get_sensor_data(sensor_id, from_date=from_date, cols=[col]) for sensor_id in sensor_ids]
data = | pd.concat(dfs, axis=1) | pandas.concat |
#Se omiten tildes para evitar inconvenientes de codificacion
#Librerias requeridas
import sqlite3
from sqlite3 import Error
import pandas as pd
import numpy as np
import sys
import random
#Definir separador para la carga de los archivos CSV
separador = ";"
#Funciones para generacion de campos aleatorios
#Funcion para generacion de correos a partir de nombres y apellidos
def generarCorreo(nombre,primerApellido,segundoApellido):
#Tuplas base para generacion aleatoria
dominios = ('gmail.com','hotmail.com','utp.edu.co','yahoo.com','mintic.gov.co','latinmail.com')
modos = (1,2,3)
modoElegido = random.choice(modos)
if modoElegido == 1:
return nombre+'.'+primerApellido+'@'+random.choice(dominios)
elif modoElegido == 2:
return nombre+str(random.randint(1940,2018))+'_'+segundoApellido+'@'+random.choice(dominios)
elif modoElegido == 3:
return nombre+'.'+primerApellido+'.'+segundoApellido+'0'+str(random.randint(1,999))+'@'+random.choice(dominios)
#Funcion para generacion de ids de cajeros aleatorios
def generarId(tamañoCodigo=7):
letras = "ABCDEFGHIJKLMNOPQURSTUVWXYZabcdefghijklmnoprstuvwxyz"
numeros = "1234567890"
listaCaracteresCodigo = [str()]*tamañoCodigo
for i in range(len(listaCaracteresCodigo)):#Realizar el proceso cinco veces
#Decidir si es numero o letra
if random.randint(0, 1):
listaCaracteresCodigo[i] = letras[ random.randint(0, len(letras)-1) ]#Letra
else:
listaCaracteresCodigo[i] = numeros[ random.randint(0, len(numeros)-1) ]#Numero en formato str
#Decidir si es del banco principal o si es de un banco adquirido
if random.randint(0, 1):
listaCaracteresCodigo.append('BP') #Banco principal
else:
listaCaracteresCodigo.append('FI') #Firma bancaria integrada (comprada)
#Convertir el listado a string
idGenerado = "".join(listaCaracteresCodigo)
#Retornar id tipo str
return idGenerado
#Conexion a base de datos SQLite -> Si la base de datos no existe, se crea una nueva
def crearConexion(rutaArchivoBD):
conn = None
try:
conn = sqlite3.connect(rutaArchivoBD)
print("Conexion/Creacion Exitosa")
print("Version SQLite3: ",sqlite3.version)
except Error as e:
print(e)
return conn
#Funcion para relacionar el tipo de dato de una serie pandas con los tipos de SQLite3
def relacionarTipos(seriePandas):
if seriePandas.dtype == np.int64:
return "INTEGER"
elif seriePandas.dtype == np.float64:
return "REAL"
elif seriePandas.dtype == 'object':
try:
seriePandas = pd.to_datetime(seriePandas)
return "DATETIME"
except:
if isinstance(seriePandas[0],str):
return "VARCHAR(45)"
else:
return "Objeto no identificado"
else:
return "Tipo de dato NO IDENTIFICADO"
#Funcion para crear la tabla a partir de la sentencia recibida como argumento
def crearTabla(conn, sql_creacionTabla):
try:
c = conn.cursor()
c.execute(sql_creacionTabla)
except Error as e:
print(e)
#Funcion para insertar registros a la tabla creada
def insertarRegistro(conn, sql_crearRetistro, tuplaRegistro):
cur = conn.cursor()
cur.execute(sql_crearRetistro, tuplaRegistro)
conn.commit()
return cur.lastrowid
#Funcion para generar tablas de tipos (tipo de producto, servicio, proyecto, publicacion, etc)
def generarTablaExplicita(rutaTablaCSV,conn):
#Carga del CSV en un dataframe de pandas
try:
df = pd.read_csv(rutaTablaCSV,sep=separador)
except:
print("Error al leer el archivo CSV de la tabla")
sys.exit1(1)#Terminar prematuramente el procedimiento
#Obtener el nombre de la tabla
nombreTabla = rutaTablaCSV.split('.')[0]
#Iterar por cada columna del dataframe para la especificacion de los campos de la tabla
sqlCreacionCampos = f""" ID_{nombreTabla} INTEGER NOT NULL, \n """
for i,columna in enumerate(df):
sqlCreacionCampos += f""" {columna} {relacionarTipos(df[columna])} NOT NULL, \n """
sqlCreacionCampos += f""" PRIMARY KEY ( ID_{nombreTabla} )"""
#Sentencia con la construccion de la tabla
sql_creacionTabla = f"""CREATE TABLE {nombreTabla} (
{sqlCreacionCampos});
"""
#Salida de diagnostico
print(sql_creacionTabla)
#Proceder a crear la tabla
crearTabla(conn, sql_creacionTabla)
#Una vez creada la tabla insertar todas las filas alojadas en el dataframe
#Por cada una de las filas
for _, fila in df.iterrows():
#Inicializar coleccion de valores del registro
coleccionValores = list()
#Inicializar la consulta
sqlInsertarFila = f"INSERT INTO {nombreTabla}("
#Inicializar sucesion de tokens para la insercion
tokens = ""
#Añadir los nombres de las columnas a la insercion
fila = list(fila)
for i,columna in enumerate(df):
sqlInsertarFila += f"{columna},"
#coleccionValores.append(fila[columna])
coleccionValores.append(fila[i])
tokens += "?,"
#Cambio de la coma y cierre de los nombres
sqlInsertarFila = sqlInsertarFila[:-1] + f") VALUES({tokens[:-1]})"
#Convertir la coleccion a tupla para la insercion
coleccionValores = tuple(coleccionValores)
#Salida de diagnostico, previo al envio a traves de la conexion
print()
print("Antes de enviar:")
print("Consulta-> ",sqlInsertarFila)
print("Valores-> ",coleccionValores)
##input()
#Realizar la insercion
insertarRegistro(conn,sqlInsertarFila,coleccionValores)
#Retornar el nombre de la tabla creada
return nombreTabla
#Funcion para generar documento de identidad alfanumerico
def generarCodigo(numCaracteres=7):
alfanumericos = "abcdefghijklmnopqrstuvwxyz1234567890"
primerCaracter = 'CC'
listaSeleccion = [ random.choice(alfanumericos) for _ in range(numCaracteres) ]
listaSeleccion.insert(0,primerCaracter)
return ''.join(listaSeleccion)
#Funcion para generacion de listado de fechas
def generadorFechas(añoInicial,añoFinal):
#añosConsiderados = (2020,2021)
añosConsiderados = tuple([x for x in range(añoInicial,añoFinal+1)])
contenedorFechasDias = []
for año in añosConsiderados:
for mes in range(1,13):
for dia in range(1,29):
#Limitar las transacciones hasta una fecha actual (final de junio de 2021)
if año == 2021 and mes >= 7:
break
else:
strDia = str(dia)
if dia < 10:
strDia = '0'+ str(dia)
strMes = str(mes)
if mes < 10:
strMes = '0'+ str(mes)
strAño = str(año)
#contenedorFechasDias.append(f"{strDia}-{strMes}-{strAño}")
#contenedorFechasDias.append(f"{strDia}/{strMes}/{strAño}")
contenedorFechasDias.append(f"{strAño}-{strMes}-{strDia}")
return contenedorFechasDias
#Funcion para generar tabla de usuarios/clientes/responsables a partir de los campos y dominios generados en el CSV recibido
def generarTablaUsuario(rutaTablaCSV,conn,numeroRegistros):
#Carga del CSV en un dataframe de pandas
try:
df = pd.read_csv(rutaTablaCSV,sep=separador)
except:
print("Error al leer el archivo CSV de la tabla")
sys.exit1(1)#Terminar prematuramente el procedimiento
#Contenedor para fechas de nacimiento del usuario para hacer una seleccion aleatoria
coleccionFechasNacimiento = generadorFechas(1940,2002)
#Obtener el nombre de la tabla
nombreTabla = rutaTablaCSV.split('.')[0]
#Iterar por cada columna del dataframe para la especificacion de los campos de la tabla
sqlCreacionCampos = f""" ID_{nombreTabla} INTEGER NOT NULL, \n """
for i,columna in enumerate(df):
sqlCreacionCampos += f""" {columna} {relacionarTipos(df[columna])} NOT NULL, \n """
#Agregar campos especificos tabla usuario
sqlCreacionCampos += f""" Documento_Identidad VARCHAR(12) NOT NULL, \n """
sqlCreacionCampos += f""" Fecha_Nacimiento DATETIME NOT NULL, \n """
#Cierre de la tabla
sqlCreacionCampos += f""" PRIMARY KEY ( ID_{nombreTabla} )"""
#Sentencia con la construccion de la tabla
sql_creacionTabla = f"""CREATE TABLE {nombreTabla} (
{sqlCreacionCampos});
"""
#Salida de diagnostico
print(sql_creacionTabla)
#Proceder a crear la tabla
crearTabla(conn, sql_creacionTabla)
#Una vez creada la tabla insertar todas las filas alojadas en el dataframe
# #Salida de diagnostico
# [print( list(df[columna].dropna()) ) for columna in df]
# #input()#Pausa para revisar consola
#Construir el numero de registros/filas indicados en el argumento correspondiente
for _ in range(numeroRegistros):
#Generar tupla para la consulta, seleccionando valores aleatorios
coleccionValores = [ random.choice( list(df[columna].dropna()) ) for columna in df ]
#Adicionar los campos especificos de usuario
coleccionValores.append(generarId()[:-2])
coleccionValores.append(random.choice(coleccionFechasNacimiento))
#Convertir la coleccion en tupla para generar la sentencia insert
coleccionValores = tuple(coleccionValores)
#Inicializar la consulta
sqlInsertarFila = f"INSERT INTO {nombreTabla}("
#Inicializar sucesion de tokens para la insercion
tokens = ""
#Añadir los nombres de las columnas a la insercion
for columna in df:
sqlInsertarFila += f"{columna},"
tokens += "?,"
#Adicionar tokens por cada campo particular de usuario
tokens += "?,"
tokens += "?,"
#Cambio de la coma y cierre de los nombres para el caso de usuario
sqlInsertarFila = sqlInsertarFila[:-1] + f", Documento_Identidad, Fecha_Nacimiento) VALUES({tokens[:-1]})"
#Salida de diagnostico, previo al envio a traves de la conexion
print()
print("Antes de enviar:")
print("Consulta-> ",sqlInsertarFila)
print("Valores-> ",coleccionValores)
##input()
#Realizar la insercion
insertarRegistro(conn,sqlInsertarFila,coleccionValores)
#Al terminar el prooceso, retornar el nombre de la tabla para realizar consultas de enlazado
return nombreTabla
#Funcion que retorna el listado de tuplas de todos los usuarios/responsables/clientes
def seleccionarUsuarios(conn,nombreTablaUsuarios):
cur = conn.cursor()
cur.execute(f"SELECT * FROM {nombreTablaUsuarios}")
rows = cur.fetchall()
# #Salida de diagnostico
# for row in rows:
# print(row)
return rows
#Funcion que retorna el listado de tuplas de todos los tipos
def seleccionarTipos(conn,nombreTablaTipos):
cur = conn.cursor()
cur.execute(f"SELECT * FROM {nombreTablaTipos}")
rows = cur.fetchall()
# #Salida de diagnostico
# for row in rows:
# print(row)
return rows
#Funcion para obtener todos los registros de una tabla
def seleccionarTodos(conn,nombreTabla):
cur = conn.cursor()
cur.execute(f"SELECT * FROM {nombreTabla}")
rows = cur.fetchall()
# #Salida de diagnostico
# for row in rows:
# print(row)
return rows
#Funcion para generar tabla producto/servicio/proyecto a partir de los campos y dominios generados en el CSV recibido
def generarTablaProductoServicio(rutaTablaCSV,conn,numeroRegistros,nombreTablaTipos,nombreTablaUsuarios):
#Carga del CSV en un dataframe de pandas
try:
df = | pd.read_csv(rutaTablaCSV,sep=separador) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
from math import pi
from numpy import sign, nan, append, zeros, max, array, power, sqrt
from pandas import Series, DataFrame, concat
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def _create_costs(net, ppc, gen_lookup, type, idx):
if ppc['gencost'][idx, 0] == 1:
if not len(ppc['gencost'][idx, 4:]) == 2*ppc['gencost'][idx, 3]:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
pp.create_piecewise_linear_cost(net, gen_lookup.element.at[idx],
gen_lookup.element_type.at[idx],
ppc['gencost'][idx, 4:], type)
elif ppc['gencost'][idx, 0] == 2:
if len(ppc['gencost'][idx, 4:]) == ppc['gencost'][idx, 3]:
n = len(ppc['gencost'][idx, 4:])
values = ppc['gencost'][idx, 4:] / power(1e3, array(range(n))[::-1])
else:
logger.error("In gencost line %s, the number n does not fit to the number of values" %
idx)
pp.create_polynomial_cost(net, gen_lookup.element.at[idx], gen_lookup.element_type.at[idx],
values, type)
else:
logger.info("Cost mode of gencost line %s is unknown." % idx)
def from_ppc(ppc, f_hz=50, validate_conversion=False):
"""
This function converts pypower case files to pandapower net structure.
INPUT:
**ppc** : The pypower case file.
OPTIONAL:
**f_hz** (float, 50) - The frequency of the network.
**validate_conversion** (bool, False) - If True, validate_from_ppc is run after conversion.
OUTPUT:
**net** : pandapower net.
EXAMPLE:
import pandapower.converter as pc
from pypower import case4gs
ppc_net = case4gs.case4gs()
pp_net = pc.from_ppc(ppc_net, f_hz=60)
"""
# --- catch common failures
if Series(ppc['bus'][:, 9] <= 0).any():
logger.info('There are false baseKV given in the pypower case file.')
# --- general_parameters
baseMVA = ppc['baseMVA'] # MVA
omega = pi * f_hz # 1/s
MAX_VAL = 99999.
net = pp.create_empty_network(f_hz=f_hz)
# --- bus data -> create buses, sgen, load, shunt
for i in range(len(ppc['bus'])):
# create buses
pp.create_bus(net, name=int(ppc['bus'][i, 0]), vn_kv=ppc['bus'][i, 9], type="b",
zone=ppc['bus'][i, 6], in_service=bool(ppc['bus'][i, 1] != 4),
max_vm_pu=ppc['bus'][i, 11], min_vm_pu=ppc['bus'][i, 12])
# create sgen, load
if ppc['bus'][i, 2] > 0:
pp.create_load(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
controllable=False)
elif ppc['bus'][i, 2] < 0:
pp.create_sgen(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
type="", controllable=False)
elif ppc['bus'][i, 3] != 0:
pp.create_load(net, i, p_kw=ppc['bus'][i, 2] * 1e3, q_kvar=ppc['bus'][i, 3] * 1e3,
controllable=False)
# create shunt
if ppc['bus'][i, 4] != 0 or ppc['bus'][i, 5] != 0:
pp.create_shunt(net, i, p_kw=ppc['bus'][i, 4] * 1e3,
q_kvar=-ppc['bus'][i, 5] * 1e3)
# unused data of ppc: Vm, Va (partwise: in ext_grid), zone
# --- gen data -> create ext_grid, gen, sgen
gen_lookup = DataFrame(nan, columns=['element', 'element_type'],
index=range(len(ppc['gen'][:, 0])))
for i in range(len(ppc['gen'])):
# if in ppc is only one gen -> numpy initially uses one dim array -> change to two dim array
if len(ppc["gen"].shape) == 1:
ppc["gen"] = array(ppc["gen"], ndmin=2)
current_bus_idx = pp.get_element_index(net, 'bus', name=int(ppc['gen'][i, 0]))
current_bus_type = int(ppc['bus'][current_bus_idx, 1])
# create ext_grid
if current_bus_type == 3:
if len(pp.get_connected_elements(net, 'ext_grid', current_bus_idx)) > 0:
logger.info('At bus %d an ext_grid already exists. ' % current_bus_idx +
'Because of that generator %d ' % i +
'is converted not as an ext_grid but as a sgen')
current_bus_type = 1
else:
gen_lookup.element.loc[i] = pp.create_ext_grid(
net, bus=current_bus_idx, vm_pu=ppc['gen'][i, 5],
va_degree=ppc['bus'][current_bus_idx, 8], in_service=bool(ppc['gen'][i, 7] > 0),
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3)
gen_lookup.element_type.loc[i] = 'ext_grid'
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# create gen
elif current_bus_type == 2:
gen_lookup.element.loc[i] = pp.create_gen(
net, bus=current_bus_idx, vm_pu=ppc['gen'][i, 5], p_kw=-ppc['gen'][i, 1] * 1e3,
in_service=bool(ppc['gen'][i, 7] > 0), controllable=True,
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3)
gen_lookup.element_type.loc[i] = 'gen'
if ppc['gen'][i, 1] < 0:
logger.info('p_kw of gen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# create sgen
if current_bus_type == 1:
gen_lookup.element.loc[i] = pp.create_sgen(
net, bus=current_bus_idx, p_kw=-ppc['gen'][i, 1] * 1e3,
q_kvar=-ppc['gen'][i, 2] * 1e3, type="", in_service=bool(ppc['gen'][i, 7] > 0),
max_p_kw=-ppc['gen'][i, 9] * 1e3, min_p_kw=-ppc['gen'][i, 8] * 1e3,
max_q_kvar=ppc['gen'][i, 3] * 1e3, min_q_kvar=ppc['gen'][i, 4] * 1e3,
controllable=True)
gen_lookup.element_type.loc[i] = 'sgen'
if ppc['gen'][i, 1] < 0:
logger.info('p_kw of sgen %d must be less than zero but is not.' % i)
if ppc['gen'][i, 4] > ppc['gen'][i, 3]:
logger.info('min_q_kvar of gen %d must be less than max_q_kvar but is not.' % i)
if -ppc['gen'][i, 9] < -ppc['gen'][i, 8]:
logger.info('max_p_kw of gen %d must be less than min_p_kw but is not.' % i)
# unused data of ppc: Vg (partwise: in ext_grid and gen), mBase, Pc1, Pc2, Qc1min, Qc1max,
# Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30,ramp_q, apf
# --- branch data -> create line, trafo
for i in range(len(ppc['branch'])):
from_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 0]))
to_bus = pp.get_element_index(net, 'bus', name=int(ppc['branch'][i, 1]))
from_vn_kv = ppc['bus'][from_bus, 9]
to_vn_kv = ppc['bus'][to_bus, 9]
if (from_vn_kv == to_vn_kv) & ((ppc['branch'][i, 8] == 0) | (ppc['branch'][i, 8] == 1)) & \
(ppc['branch'][i, 9] == 0):
Zni = ppc['bus'][to_bus, 9]**2/baseMVA # ohm
max_i_ka = ppc['branch'][i, 5]/ppc['bus'][to_bus, 9]/sqrt(3)
if max_i_ka == 0.0:
max_i_ka = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"maximum branch flow")
pp.create_line_from_parameters(
net, from_bus=from_bus, to_bus=to_bus, length_km=1,
r_ohm_per_km=ppc['branch'][i, 2]*Zni, x_ohm_per_km=ppc['branch'][i, 3]*Zni,
c_nf_per_km=ppc['branch'][i, 4]/Zni/omega*1e9/2,
max_i_ka=max_i_ka, type='ol',
in_service=bool(ppc['branch'][i, 10]))
else:
if from_vn_kv >= to_vn_kv:
hv_bus = from_bus
vn_hv_kv = from_vn_kv
lv_bus = to_bus
vn_lv_kv = to_vn_kv
tp_side = 'hv'
else:
hv_bus = to_bus
vn_hv_kv = to_vn_kv
lv_bus = from_bus
vn_lv_kv = from_vn_kv
tp_side = 'lv'
if from_vn_kv == to_vn_kv:
logger.warning('The pypower branch %d (from_bus, to_bus)=(%d, %d) is considered'
' as a transformer because of a ratio != 0 | 1 but it connects '
'the same voltage level', i, ppc['branch'][i, 0],
ppc['branch'][i, 1])
rk = ppc['branch'][i, 2]
xk = ppc['branch'][i, 3]
zk = (rk ** 2 + xk ** 2) ** 0.5
sn = ppc['branch'][i, 5] * 1e3
if sn == 0.0:
sn = MAX_VAL
logger.debug("ppc branch rateA is zero -> Using MAX_VAL instead to calculate " +
"apparent power")
ratio_1 = 0 if ppc['branch'][i, 8] == 0 else (ppc['branch'][i, 8] - 1) * 100
i0_percent = -ppc['branch'][i, 4] * 100 * baseMVA * 1e3 / sn
if i0_percent < 0:
logger.info('A transformer always behaves inductive consumpting but the '
'susceptance of pypower branch %d (from_bus, to_bus)=(%d, %d) is '
'positive.', i, ppc['branch'][i, 0], ppc['branch'][i, 1])
pp.create_transformer_from_parameters(
net, hv_bus=hv_bus, lv_bus=lv_bus, sn_kva=sn, vn_hv_kv=vn_hv_kv,
vn_lv_kv=vn_lv_kv, vsc_percent=sign(xk) * zk * sn / 1e3, vscr_percent=rk * sn / 1e3,
pfe_kw=0, i0_percent=i0_percent, shift_degree=ppc['branch'][i, 9],
tp_st_percent=abs(ratio_1) if ratio_1 else nan,
tp_pos=sign(ratio_1) if ratio_1 else nan,
tp_side=tp_side if ratio_1 else None, tp_mid=0 if ratio_1 else nan)
# unused data of ppc: rateB, rateC
# --- gencost -> create polynomial_cost, piecewise_cost
if 'gencost' in ppc:
if len(ppc['gencost'].shape) == 1:
# reshape gencost if only one gencost is given -> no indexError
ppc['gencost'] = ppc['gencost'].reshape((1, ppc['gencost'].shape[0]))
if ppc['gencost'].shape[0] <= gen_lookup.shape[0]:
idx_p = range(ppc['gencost'].shape[0])
idx_q = []
elif ppc['gencost'].shape[0] > gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], ppc['gencost'].shape[0])
if ppc['gencost'].shape[0] >= 2*gen_lookup.shape[0]:
idx_p = range(gen_lookup.shape[0])
idx_q = range(gen_lookup.shape[0], 2*gen_lookup.shape[0])
for idx in idx_p:
_create_costs(net, ppc, gen_lookup, 'p', idx)
for idx in idx_q:
_create_costs(net, ppc, gen_lookup, 'q', idx)
# areas are unconverted
if validate_conversion:
logger.setLevel(logging.DEBUG)
if not validate_from_ppc(ppc, net):
logger.error("Validation failed.")
return net
def validate_from_ppc(ppc_net, pp_net, max_diff_values={
"vm_pu": 1e-6, "va_degree": 1e-5, "p_branch_kw": 1e-3, "q_branch_kvar": 1e-3, "p_gen_kw": 1e-3,
"q_gen_kvar": 1e-3}):
"""
This function validates the pypower case files to pandapower net structure conversion via a \
comparison of loadflow calculation results. (Hence the opf cost conversion is not validated.)
INPUT:
**ppc_net** - The pypower case file which already contains the pypower powerflow results.
**pp_net** - The pandapower network.
OPTIONAL:
**max_diff_values** - Dict of maximal allowed difference values. The keys must be
'vm_pu', 'va_degree', 'p_branch_kw', 'q_branch_kvar', 'p_gen_kw' and 'q_gen_kvar' and
the values floats.
OUTPUT:
**conversion_success** - conversion_success is returned as False if pypower or pandapower
cannot calculate a powerflow or if the maximum difference values (max_diff_values )
cannot be hold.
EXAMPLE:
import pandapower.converter as pc
pp_net = cv.from_ppc(ppc_net, f_hz=50)
conversion_success = cv.validate_from_ppc(ppc_net, pp_net)
NOTE:
The user has to take care that the loadflow results already are included in the provided \
ppc_net.
"""
# --- check pypower powerflow success, if possible
ppc_success = True
if 'success' in ppc_net.keys():
if ppc_net['success'] != 1:
ppc_success = False
logger.error("The given ppc data indicates an unsuccessful pypower powerflow: " +
"'ppc_net['success'] != 1'")
if (ppc_net['branch'].shape[1] < 17):
ppc_success = False
logger.error("The shape of given ppc data indicates missing pypower powerflow results.")
# --- try to run a pandapower powerflow
try:
pp.runpp(pp_net, init="dc", calculate_voltage_angles=True, trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(pp_net, calculate_voltage_angles=True, init="flat", trafo_model="pi")
except pp.LoadflowNotConverged:
try:
pp.runpp(pp_net, trafo_model="pi")
except pp.LoadflowNotConverged:
logger.error('The pandapower powerflow does not converge.')
return False
# --- prepare powerflow result comparison by reordering pp results as they are in ppc results
if (ppc_success) & (pp_net.converged):
# --- store pypower powerflow results
ppc_res_branch = ppc_net['branch'][:, 13:17]
ppc_res_bus = ppc_net['bus'][:, 7:9]
ppc_res_gen = ppc_net['gen'][:, 1:3]
# --- pandapower bus result table
pp_res_bus = array(pp_net.res_bus[['vm_pu', 'va_degree']])
# --- pandapower gen result table
pp_res_gen = zeros([1, 2])
# consideration of parallel generators via storing how much generators have been considered
# each node
already_used_gen = Series(zeros([pp_net.bus.shape[0]]), index=pp_net.bus.index).astype(int)
GENS = DataFrame(ppc_net['gen'][:, [0]].astype(int))
change_q_compare = []
for i, j in GENS.iterrows():
current_bus_idx = pp.get_element_index(pp_net, 'bus', name=j[0])
current_bus_type = int(ppc_net['bus'][current_bus_idx, 1])
# ext_grid
if current_bus_type == 3:
if already_used_gen.at[current_bus_idx] == 0:
pp_res_gen = append(pp_res_gen, array(pp_net.res_ext_grid[
pp_net.ext_grid.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
else:
pp_res_gen = append(pp_res_gen, array(pp_net.res_sgen[
pp_net.sgen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]-1].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
change_q_compare += [j[0]]
# gen
elif current_bus_type == 2:
pp_res_gen = append(pp_res_gen, array(pp_net.res_gen[
pp_net.gen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
if already_used_gen.at[current_bus_idx] > 0:
change_q_compare += [j[0]]
already_used_gen.at[current_bus_idx] += 1
# sgen
elif current_bus_type == 1:
pp_res_gen = append(pp_res_gen, array(pp_net.res_sgen[
pp_net.sgen.bus == current_bus_idx][['p_kw', 'q_kvar']])[
already_used_gen.at[current_bus_idx]].reshape((1, 2)), 0)
already_used_gen.at[current_bus_idx] += 1
pp_res_gen = pp_res_gen[1:, :] # delete initial zero row
# --- pandapower branch result table
pp_res_branch = zeros([1, 4])
# consideration of parallel branches via storing how much branches have been considered
# each node-to-node-connection
init1 = concat([pp_net.line.from_bus, pp_net.line.to_bus], axis=1).drop_duplicates()
init2 = concat([pp_net.trafo.hv_bus, pp_net.trafo.lv_bus], axis=1).drop_duplicates()
init1['hv_bus'] = nan
init1['lv_bus'] = nan
init2['from_bus'] = nan
init2['to_bus'] = nan
already_used_branches = concat([init1, init2], axis=0)
already_used_branches['number'] = zeros([already_used_branches.shape[0], 1]).astype(int)
BRANCHES = | DataFrame(ppc_net['branch'][:, [0, 1, 8, 9]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
def get_series(data: (pd.Series, pd.DataFrame), col='close') -> pd.DataFrame:
"""
Get close column from intraday data
Args:
data: intraday data
col: column to return
Returns:
pd.Series or pd.DataFrame
"""
if isinstance(data, pd.Series): return pd.DataFrame(data)
if not isinstance(data.columns, pd.MultiIndex): return data
return data.xs(col, axis=1, level=1)
def clean_cols(data: pd.DataFrame) -> pd.DataFrame:
"""
Clean column name
"""
data.columns.name = None
return data
def standard_cols(data: pd.DataFrame, col_maps: dict = None) -> pd.DataFrame:
"""
Rename data columns to snake case
Args:
data: input data
col_maps: column maps
Returns:
pd.DataFrame
Examples:
>>> dvd = pd.read_pickle('xbbg/tests/data/sample_dvd_mc_raw.pkl').iloc[:, :4]
>>> dvd
Declared Date Ex-Date Record Date Payable Date
MC FP Equity 2019-07-24 2019-12-06 2019-12-09 2019-12-10
MC FP Equity 2019-01-29 2019-04-25 2019-04-26 2019-04-29
MC FP Equity 2018-07-24 2018-12-04 2018-12-05 2018-12-06
MC FP Equity 2018-01-25 2018-04-17 2018-04-18 2018-04-19
>>> dvd.pipe(standard_cols)
declared_date ex_date record_date payable_date
MC FP Equity 2019-07-24 2019-12-06 2019-12-09 2019-12-10
MC FP Equity 2019-01-29 2019-04-25 2019-04-26 2019-04-29
MC FP Equity 2018-07-24 2018-12-04 2018-12-05 2018-12-06
MC FP Equity 2018-01-25 2018-04-17 2018-04-18 2018-04-19
>>> dvd.pipe(standard_cols, col_maps={'Declared Date': 'dec_date'})
dec_date ex_date record_date payable_date
MC FP Equity 2019-07-24 2019-12-06 2019-12-09 2019-12-10
MC FP Equity 2019-01-29 2019-04-25 2019-04-26 2019-04-29
MC FP Equity 2018-07-24 2018-12-04 2018-12-05 2018-12-06
MC FP Equity 2018-01-25 2018-04-17 2018-04-18 2018-04-19
"""
if col_maps is None: col_maps = dict()
return data.rename(
columns=lambda vv: col_maps.get(
vv, vv.lower().replace(' ', '_').replace('-', '_')
)
)
def apply_fx(
data: (pd.Series, pd.DataFrame),
fx: (int, float, pd.Series, pd.DataFrame), power=-1.
) -> pd.DataFrame:
"""
Apply FX to data
Args:
data: price data
fx: FX price data
power: apply for FX price
Returns:
Price * FX ** Power
where FX uses latest available price
Examples:
>>> pd.set_option('precision', 2)
>>> rms = (
... pd.read_pickle('xbbg/tests/data/sample_rms_ib1.pkl')
... .pipe(get_series, col='close')
... .pipe(to_numeric)
... .pipe(clean_cols)
... .pipe(dropna)
... ).tail()
>>> eur = pd.read_pickle('xbbg/tests/data/sample_eur_ib.pkl')
>>> rms
RMS FP Equity
2020-01-17 16:26:00+00:00 725.4
2020-01-17 16:27:00+00:00 725.2
2020-01-17 16:28:00+00:00 725.4
2020-01-17 16:29:00+00:00 725.0
2020-01-17 16:35:00+00:00 725.6
>>> rms.iloc[:, 0].pipe(apply_fx, fx=eur)
RMS FP Equity
2020-01-17 16:26:00+00:00 653.98
2020-01-17 16:27:00+00:00 653.80
2020-01-17 16:28:00+00:00 653.98
2020-01-17 16:29:00+00:00 653.57
2020-01-17 16:35:00+00:00 654.05
>>> rms.pipe(apply_fx, fx=1.1090)
RMS FP Equity
2020-01-17 16:26:00+00:00 654.10
2020-01-17 16:27:00+00:00 653.92
2020-01-17 16:28:00+00:00 654.10
2020-01-17 16:29:00+00:00 653.74
2020-01-17 16:35:00+00:00 654.28
"""
if isinstance(data, pd.Series): data = | pd.DataFrame(data) | pandas.DataFrame |
"""Tools used for clustering analysis"""
import csv
__author__ = "<NAME> (http://www.vmusco.com)"
import numpy
import os
import pandas
from mlperf.clustering.clusteringtoolkit import ClusteringToolkit
class DatasetFacts:
"""Object alternative to method read_dataset"""
def __init__(self, data):
self.data = data
self.file_path = None
def set_data(self, data):
self.data = data
def target(self):
return self.data.target
def ground_truth_cluster_ids(self):
return self.target().unique()
def nb_clusters(self):
return len(self.ground_truth_cluster_ids())
def data_without_target(self):
return self.data.loc[:, self.data.columns != 'target']
def nb_instances(self):
"""number of instances"""
return self.data.shape[0]
def nb_features(self):
"""number of features (excluding target)"""
return self.data.shape[1] - 1
@staticmethod
def read_dataset(source_file, sep='\t'):
chunksize = 100000
text_file_reader = pandas.read_csv(source_file, sep=sep, chunksize=chunksize, iterator=True)
data = pandas.concat(text_file_reader, ignore_index=True)
ret = DatasetFacts(data)
ret.file_path = source_file
return ret
def run_for_nr(run_base, variant, algorithm, run_id):
return "{}-{}-{}".format(variant, algorithm, run_id)
def read_dataset(source_file):
print("Reading file {}...".format(source_file))
chunksize = 100000
text_file_reader = pandas.read_csv(source_file, sep='\t', chunksize=chunksize, iterator=True)
data = pandas.concat(text_file_reader, ignore_index=True)
print("Analyzing file...")
ground_truth_cluster_ids = data.target.unique()
number_clusters = len(ground_truth_cluster_ids)
print("#clusters = {}".format(number_clusters))
data_without_target = data.loc[:, data.columns != 'target']
return {
'data': data,
'data_without_target': data_without_target,
'number_clusters': number_clusters,
'target': data.target,
'ground_truth_cluster_ids': ground_truth_cluster_ids
}
def read_centroids_file(drawn_clusters_file_path):
return pandas.read_csv(drawn_clusters_file_path, header=None, dtype='float32').values
def draw_centroids(nb_clusters, data, drawn_clusters_file_path=None):
initial_clusters = data.sample(nb_clusters)
initial_clusters = numpy.asarray(initial_clusters)
if drawn_clusters_file_path:
| pandas.DataFrame(initial_clusters) | pandas.DataFrame |
# Object Oriented Programming Examples
import pandas as pd
df = | pd.DataFrame(['tree frog', 'white rhino', 'zebra']) | pandas.DataFrame |
import math
import glob
import os
import uuid
import itertools
import pandas as pd
import numpy as np
import datetime as dt
class GSTools(object):
@staticmethod
def load_csv_files(dir_str):
'''
This function reads all csv from the given directory, stores them in a dictionary and returns it.
- dir_str should be of the form "../ib-data/nyse-daily-tech/"
- expected format: the csv files should have a 'date' column
'''
# read all paths
csv_paths = sorted(glob.glob(dir_str + "*.csv"))
# create python dictionary
data = {}
for path in csv_paths:
# get the file names
filename = os.path.basename(path)
filename_without_ext = os.path.splitext(filename)[0]
# read the csv file as dataframe
df = pd.read_csv(path)
df['date'] = | pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.