prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Bio import pairwise2
from scipy import interp
from scipy.stats import linregress
from sklearn.metrics import roc_curve, auc, precision_recall_curve
import thoipapy
import thoipapy.validation.bocurve
from thoipapy.utils import make_sure_path_exists
def collect_indiv_validation_data(s, df_set, logging, namedict, predictors, THOIPA_predictor_name, subsets):
"""
Parameters
----------
s
df_set
logging
namedict
predictors
THOIPA_predictor_name
Returns
-------
"""
logging.info("start collect_indiv_validation_data THOIPA_PREDDIMER_TMDOCK")
ROC_AUC_df = pd.DataFrame()
PR_AUC_df = pd.DataFrame()
mean_o_minus_r_by_sample_df = pd.DataFrame()
AUBOC_from_complete_data_ser = pd.Series()
AUC_AUBOC_name_list = []
linechar_name_list = []
AUBOC_list = []
df_o_minus_r_mean_df = | pd.DataFrame() | pandas.DataFrame |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import decimal
from datetime import datetime
from distutils.version import LooseVersion
import inspect
import sys
import unittest
from io import StringIO
from typing import List
import numpy as np
import pandas as pd
from pandas.tseries.offsets import DateOffset
from pyspark import StorageLevel
from pyspark.ml.linalg import SparseVector
from pyspark.sql.types import StructType
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.frame import CachedDataFrame
from pyspark.pandas.missing.frame import _MissingPandasLikeDataFrame
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
from pyspark.testing.pandasutils import (
have_tabulate,
PandasOnSparkTestCase,
SPARK_CONF_ARROW_ENABLED,
tabulate_requirement_message,
)
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.utils import name_like_string
class DataFrameTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=np.random.rand(9),
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
return pdf, psdf
def test_dataframe(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf["a"] + 1, pdf["a"] + 1)
self.assert_eq(psdf.columns, pd.Index(["a", "b"]))
self.assert_eq(psdf[psdf["b"] > 2], pdf[pdf["b"] > 2])
self.assert_eq(-psdf[psdf["b"] > 2], -pdf[pdf["b"] > 2])
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b.mean(), pdf.b.mean())
self.assert_eq(psdf.b.var(), pdf.b.var())
self.assert_eq(psdf.b.std(), pdf.b.std())
pdf, psdf = self.df_pair
self.assert_eq(psdf[["a", "b"]], pdf[["a", "b"]])
self.assertEqual(psdf.a.notnull().rename("x").name, "x")
# check ps.DataFrame(ps.Series)
pser = pd.Series([1, 2, 3], name="x", index=np.random.rand(3))
psser = ps.from_pandas(pser)
self.assert_eq(pd.DataFrame(pser), ps.DataFrame(psser))
# check psdf[pd.Index]
pdf, psdf = self.df_pair
column_mask = pdf.columns.isin(["a", "b"])
index_cols = pdf.columns[column_mask]
self.assert_eq(psdf[index_cols], pdf[index_cols])
def _check_extension(self, psdf, pdf):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(psdf, pdf, check_exact=False)
for dtype in psdf.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assert_eq(psdf, pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1, 2, None, 4], dtype="Int8"),
"b": pd.Series([1, None, None, 4], dtype="Int16"),
"c": pd.Series([1, 2, None, None], dtype="Int32"),
"d": pd.Series([None, 2, None, 4], dtype="Int64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_astype_extension_dtypes(self):
pdf = pd.DataFrame(
{
"a": [1, 2, None, 4],
"b": [1, None, None, 4],
"c": [1, 2, None, None],
"d": [None, 2, None, 4],
}
)
psdf = ps.from_pandas(pdf)
astype = {"a": "Int8", "b": "Int16", "c": "Int32", "d": "Int64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_extension_object_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series(["a", "b", None, "c"], dtype="string"),
"b": pd.Series([True, None, False, True], dtype="boolean"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_astype_extension_object_dtypes(self):
pdf = pd.DataFrame({"a": ["a", "b", None, "c"], "b": [True, None, False, True]})
psdf = ps.from_pandas(pdf)
astype = {"a": "string", "b": "boolean"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_extension_float_dtypes(self):
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, None, 4.0], dtype="Float32"),
"b": pd.Series([1.0, None, 3.0, 4.0], dtype="Float64"),
}
)
psdf = ps.from_pandas(pdf)
self._check_extension(psdf, pdf)
self._check_extension(psdf + 1, pdf + 1)
self._check_extension(psdf + psdf, pdf + pdf)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_astype_extension_float_dtypes(self):
pdf = pd.DataFrame({"a": [1.0, 2.0, None, 4.0], "b": [1.0, None, 3.0, 4.0]})
psdf = ps.from_pandas(pdf)
astype = {"a": "Float32", "b": "Float64"}
self._check_extension(psdf.astype(astype), pdf.astype(astype))
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psser = ps.Series([4, 5, 6])
self.assertRaises(ValueError, lambda: psdf.insert(0, "y", psser))
self.assertRaisesRegex(
ValueError, "cannot insert b, already exists", lambda: psdf.insert(1, "b", 10)
)
self.assertRaisesRegex(
TypeError,
'"column" should be a scalar value or tuple that contains scalar values',
lambda: psdf.insert(0, list("abc"), psser),
)
self.assertRaisesRegex(
TypeError,
"loc must be int",
lambda: psdf.insert((1,), "b", 10),
)
self.assertRaisesRegex(
NotImplementedError,
"Assigning column name as tuple is only supported for MultiIndex columns for now.",
lambda: psdf.insert(0, ("e",), 10),
)
self.assertRaises(ValueError, lambda: psdf.insert(0, "e", [7, 8, 9, 10]))
self.assertRaises(ValueError, lambda: psdf.insert(0, "f", ps.Series([7, 8])))
self.assertRaises(AssertionError, lambda: psdf.insert(100, "y", psser))
self.assertRaises(AssertionError, lambda: psdf.insert(1, "y", psser, allow_duplicates=True))
#
# DataFrame with MultiIndex as columns
#
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(1, "b", 10)
pdf.insert(1, "b", 10)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(2, "c", 0.1)
pdf.insert(2, "c", 0.1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
psdf.insert(3, "d", psdf.b + 1)
pdf.insert(3, "d", pdf.b + 1)
self.assert_eq(psdf.sort_index(), pdf.sort_index(), almost=True)
self.assertRaisesRegex(
ValueError, "cannot insert d, already exists", lambda: psdf.insert(4, "d", 11)
)
self.assertRaisesRegex(
ValueError,
r"cannot insert \('x', 'a', 'b'\), already exists",
lambda: psdf.insert(4, ("x", "a", "b"), 11),
)
self.assertRaisesRegex(
ValueError,
'"column" must have length equal to number of column levels.',
lambda: psdf.insert(4, ("e",), 11),
)
def test_inplace(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["a"] = pdf["a"] + 10
psdf["a"] = psdf["a"] + 10
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
def test_assign_list(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
psdf["x"] = [10, 20, 30, 40, 50, 60, 70, 80, 90]
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
with self.assertRaisesRegex(ValueError, "Length of values does not match length of index"):
psdf["z"] = [10, 20, 30, 40, 50, 60, 70, 80]
def test_dataframe_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["x"], pdf["x"])
self.assert_eq(psdf["y.z"], pdf["y.z"])
self.assert_eq(psdf["x"]["b"], pdf["x"]["b"])
self.assert_eq(psdf["x"]["b"]["2"], pdf["x"]["b"]["2"])
self.assert_eq(psdf.x, pdf.x)
self.assert_eq(psdf.x.b, pdf.x.b)
self.assert_eq(psdf.x.b["2"], pdf.x.b["2"])
self.assertRaises(KeyError, lambda: psdf["z"])
self.assertRaises(AttributeError, lambda: psdf.z)
self.assert_eq(psdf[("x",)], pdf[("x",)])
self.assert_eq(psdf[("x", "a")], pdf[("x", "a")])
self.assert_eq(psdf[("x", "a", "1")], pdf[("x", "a", "1")])
def test_dataframe_column_level_name(self):
column = pd.Index(["A", "B", "C"], name="X")
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=column, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
def test_dataframe_multiindex_names_level(self):
columns = pd.MultiIndex.from_tuples(
[("X", "A", "Z"), ("X", "B", "Z"), ("Y", "C", "Z"), ("Y", "D", "Z")],
names=["lvl_1", "lvl_2", "lv_3"],
)
pdf = pd.DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16], [17, 18, 19, 20]],
columns=columns,
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.columns.names, pdf.columns.names)
self.assert_eq(psdf.to_pandas().columns.names, pdf.columns.names)
psdf1 = ps.from_pandas(pdf)
self.assert_eq(psdf1.columns.names, pdf.columns.names)
self.assertRaises(
AssertionError,
lambda: ps.DataFrame(psdf1._internal.copy(column_label_names=("level",))),
)
self.assert_eq(psdf["X"], pdf["X"])
self.assert_eq(psdf["X"].columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"].to_pandas().columns.names, pdf["X"].columns.names)
self.assert_eq(psdf["X"]["A"], pdf["X"]["A"])
self.assert_eq(psdf["X"]["A"].columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf["X"]["A"].to_pandas().columns.names, pdf["X"]["A"].columns.names)
self.assert_eq(psdf[("X", "A")], pdf[("X", "A")])
self.assert_eq(psdf[("X", "A")].columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A")].to_pandas().columns.names, pdf[("X", "A")].columns.names)
self.assert_eq(psdf[("X", "A", "Z")], pdf[("X", "A", "Z")])
def test_itertuples(self):
pdf = pd.DataFrame({"num_legs": [4, 2], "num_wings": [0, 2]}, index=["dog", "hawk"])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
pdf.itertuples(index=False, name="Animal"), psdf.itertuples(index=False, name="Animal")
):
self.assert_eq(ptuple, ktuple)
for ptuple, ktuple in zip(pdf.itertuples(name=None), psdf.itertuples(name=None)):
self.assert_eq(ptuple, ktuple)
pdf.index = pd.MultiIndex.from_arrays(
[[1, 2], ["black", "brown"]], names=("count", "color")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf.columns = pd.MultiIndex.from_arrays(
[["CA", "WA"], ["age", "children"]], names=("origin", "info")
)
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="Animal"), psdf.itertuples(name="Animal")):
self.assert_eq(ptuple, ktuple)
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(
(pdf + 1).itertuples(name="num"), (psdf + 1).itertuples(name="num")
):
self.assert_eq(ptuple, ktuple)
# DataFrames with a large number of columns (>254)
pdf = pd.DataFrame(np.random.random((1, 255)))
psdf = ps.from_pandas(pdf)
for ptuple, ktuple in zip(pdf.itertuples(name="num"), psdf.itertuples(name="num")):
self.assert_eq(ptuple, ktuple)
def test_iterrows(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3],
("x", "b", "2"): [4, 5, 6],
("y.z", "c.d", "3"): [7, 8, 9],
("x", "b", "4"): [10, 11, 12],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
for (pdf_k, pdf_v), (psdf_k, psdf_v) in zip(pdf.iterrows(), psdf.iterrows()):
self.assert_eq(pdf_k, psdf_k)
self.assert_eq(pdf_v, psdf_v)
def test_reset_index(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index().index, pdf.reset_index().index)
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.index.name = "a"
psdf.index.name = "a"
with self.assertRaisesRegex(ValueError, "cannot insert a, already exists"):
psdf.reset_index()
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
# inplace
pser = pdf.a
psser = psdf.a
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf.columns = ["index", "b"]
psdf.columns = ["index", "b"]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
def test_reset_index_with_default_index_types(self):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
with ps.option_context("compute.default_index_type", "sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed-sequence"):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with ps.option_context("compute.default_index_type", "distributed"):
# the index is different.
self.assert_eq(psdf.reset_index().to_pandas().reset_index(drop=True), pdf.reset_index())
def test_reset_index_with_multiindex_columns(self):
index = pd.MultiIndex.from_tuples(
[("bird", "falcon"), ("bird", "parrot"), ("mammal", "lion"), ("mammal", "monkey")],
names=["class", "name"],
)
columns = pd.MultiIndex.from_tuples([("speed", "max"), ("species", "type")])
pdf = pd.DataFrame(
[(389.0, "fly"), (24.0, "fly"), (80.5, "run"), (np.nan, "jump")],
index=index,
columns=columns,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(level="class"), pdf.reset_index(level="class"))
self.assert_eq(
psdf.reset_index(level="class", col_level=1),
pdf.reset_index(level="class", col_level=1),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="species"),
pdf.reset_index(level="class", col_level=1, col_fill="species"),
)
self.assert_eq(
psdf.reset_index(level="class", col_level=1, col_fill="genus"),
pdf.reset_index(level="class", col_level=1, col_fill="genus"),
)
with self.assertRaisesRegex(IndexError, "Index has only 2 levels, not 3"):
psdf.reset_index(col_level=2)
pdf.index.names = [("x", "class"), ("y", "name")]
psdf.index.names = [("x", "class"), ("y", "name")]
self.assert_eq(psdf.reset_index(), pdf.reset_index())
with self.assertRaisesRegex(ValueError, "Item must have length equal to number of levels."):
psdf.reset_index(col_level=1)
def test_index_to_frame_reset_index(self):
def check(psdf, pdf):
self.assert_eq(psdf.reset_index(), pdf.reset_index())
self.assert_eq(psdf.reset_index(drop=True), pdf.reset_index(drop=True))
pdf.reset_index(drop=True, inplace=True)
psdf.reset_index(drop=True, inplace=True)
self.assert_eq(psdf, pdf)
pdf, psdf = self.df_pair
check(psdf.index.to_frame(), pdf.index.to_frame())
check(psdf.index.to_frame(index=False), pdf.index.to_frame(index=False))
check(psdf.index.to_frame(name="a"), pdf.index.to_frame(name="a"))
check(psdf.index.to_frame(index=False, name="a"), pdf.index.to_frame(index=False, name="a"))
check(psdf.index.to_frame(name=("x", "a")), pdf.index.to_frame(name=("x", "a")))
check(
psdf.index.to_frame(index=False, name=("x", "a")),
pdf.index.to_frame(index=False, name=("x", "a")),
)
def test_multiindex_column_access(self):
columns = pd.MultiIndex.from_tuples(
[
("a", "", "", "b"),
("c", "", "d", ""),
("e", "", "f", ""),
("e", "g", "", ""),
("", "", "", "h"),
("i", "", "", ""),
]
)
pdf = pd.DataFrame(
[
(1, "a", "x", 10, 100, 1000),
(2, "b", "y", 20, 200, 2000),
(3, "c", "z", 30, 300, 3000),
],
columns=columns,
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf["a"], pdf["a"])
self.assert_eq(psdf["a"]["b"], pdf["a"]["b"])
self.assert_eq(psdf["c"], pdf["c"])
self.assert_eq(psdf["c"]["d"], pdf["c"]["d"])
self.assert_eq(psdf["e"], pdf["e"])
self.assert_eq(psdf["e"][""]["f"], pdf["e"][""]["f"])
self.assert_eq(psdf["e"]["g"], pdf["e"]["g"])
self.assert_eq(psdf[""], pdf[""])
self.assert_eq(psdf[""]["h"], pdf[""]["h"])
self.assert_eq(psdf["i"], pdf["i"])
self.assert_eq(psdf[["a", "e"]], pdf[["a", "e"]])
self.assert_eq(psdf[["e", "a"]], pdf[["e", "a"]])
self.assert_eq(psdf[("a",)], pdf[("a",)])
self.assert_eq(psdf[("e", "g")], pdf[("e", "g")])
# self.assert_eq(psdf[("i",)], pdf[("i",)])
self.assert_eq(psdf[("i", "")], pdf[("i", "")])
self.assertRaises(KeyError, lambda: psdf[("a", "b")])
def test_repr_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df.__repr__()
df["a"] = df["id"]
self.assertEqual(df.__repr__(), df.to_pandas().__repr__())
def test_repr_html_cache_invalidation(self):
# If there is any cache, inplace operations should invalidate it.
df = ps.range(10)
df._repr_html_()
df["a"] = df["id"]
self.assertEqual(df._repr_html_(), df.to_pandas()._repr_html_())
def test_empty_dataframe(self):
pdf = pd.DataFrame({"a": pd.Series([], dtype="i1"), "b": pd.Series([], dtype="str")})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_all_null_dataframe(self):
pdf = pd.DataFrame(
{
"a": [None, None, None, "a"],
"b": [None, None, None, 1],
"c": [None, None, None] + list(np.arange(1, 2).astype("i1")),
"d": [None, None, None, 1.0],
"e": [None, None, None, True],
"f": [None, None, None] + list(pd.date_range("20130101", periods=1)),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
self.assert_eq(psdf.iloc[:-1], pdf.iloc[:-1])
pdf = pd.DataFrame(
{
"a": pd.Series([None, None, None], dtype="float64"),
"b": pd.Series([None, None, None], dtype="str"),
},
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_nullable_object(self):
pdf = pd.DataFrame(
{
"a": list("abc") + [np.nan, None],
"b": list(range(1, 4)) + [np.nan, None],
"c": list(np.arange(3, 6).astype("i1")) + [np.nan, None],
"d": list(np.arange(4.0, 7.0, dtype="float64")) + [np.nan, None],
"e": [True, False, True, np.nan, None],
"f": list(pd.date_range("20130101", periods=3)) + [np.nan, None],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_assign(self):
pdf, psdf = self.df_pair
psdf["w"] = 1.0
pdf["w"] = 1.0
self.assert_eq(psdf, pdf)
psdf.w = 10.0
pdf.w = 10.0
self.assert_eq(psdf, pdf)
psdf[1] = 1.0
pdf[1] = 1.0
self.assert_eq(psdf, pdf)
psdf = psdf.assign(a=psdf["a"] * 2)
pdf = pdf.assign(a=pdf["a"] * 2)
self.assert_eq(psdf, pdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "w"), ("y", "v")])
pdf.columns = columns
psdf.columns = columns
psdf[("a", "c")] = "def"
pdf[("a", "c")] = "def"
self.assert_eq(psdf, pdf)
psdf = psdf.assign(Z="ZZ")
pdf = pdf.assign(Z="ZZ")
self.assert_eq(psdf, pdf)
psdf["x"] = "ghi"
pdf["x"] = "ghi"
self.assert_eq(psdf, pdf)
def test_head(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.head(2), pdf.head(2))
self.assert_eq(psdf.head(3), pdf.head(3))
self.assert_eq(psdf.head(0), pdf.head(0))
self.assert_eq(psdf.head(-3), pdf.head(-3))
self.assert_eq(psdf.head(-10), pdf.head(-10))
with option_context("compute.ordered_head", True):
self.assert_eq(psdf.head(), pdf.head())
def test_attributes(self):
psdf = self.psdf
self.assertIn("a", dir(psdf))
self.assertNotIn("foo", dir(psdf))
self.assertRaises(AttributeError, lambda: psdf.foo)
psdf = ps.DataFrame({"a b c": [1, 2, 3]})
self.assertNotIn("a b c", dir(psdf))
psdf = ps.DataFrame({"a": [1, 2], 5: [1, 2]})
self.assertIn("a", dir(psdf))
self.assertNotIn(5, dir(psdf))
def test_column_names(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.columns, pdf.columns)
self.assert_eq(psdf[["b", "a"]].columns, pdf[["b", "a"]].columns)
self.assert_eq(psdf["a"].name, pdf["a"].name)
self.assert_eq((psdf["a"] + 1).name, (pdf["a"] + 1).name)
self.assert_eq((psdf.a + psdf.b).name, (pdf.a + pdf.b).name)
self.assert_eq((psdf.a + psdf.b.rename("a")).name, (pdf.a + pdf.b.rename("a")).name)
self.assert_eq((psdf.a + psdf.b.rename()).name, (pdf.a + pdf.b.rename()).name)
self.assert_eq((psdf.a.rename() + psdf.b).name, (pdf.a.rename() + pdf.b).name)
self.assert_eq(
(psdf.a.rename() + psdf.b.rename()).name, (pdf.a.rename() + pdf.b.rename()).name
)
def test_rename_columns(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
psdf.columns = ["x", "y"]
pdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
columns = pdf.columns
columns.name = "lvl_1"
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1"])
self.assert_eq(psdf, pdf)
msg = "Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with self.assertRaisesRegex(ValueError, msg):
psdf.columns = [1, 2, 3, 4]
# Multi-index columns
pdf = pd.DataFrame(
{("A", "0"): [1, 2, 2, 3], ("B", "1"): [1, 2, 3, 4]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
columns = pdf.columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
pdf.columns = ["x", "y"]
psdf.columns = ["x", "y"]
self.assert_eq(psdf.columns, pd.Index(["x", "y"]))
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["x", "y"])
self.assert_eq(psdf.to_spark().columns, ["x", "y"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "x", "y"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.columns, columns)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
columns.names = ["lvl_1", "lvl_2"]
psdf.columns = columns
self.assert_eq(psdf.columns.names, ["lvl_1", "lvl_2"])
self.assert_eq(psdf, pdf)
self.assert_eq(psdf._internal.data_spark_column_names, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark().columns, ["(A, 0)", "(B, 1)"])
self.assert_eq(psdf.to_spark(index_col="index").columns, ["index", "(A, 0)", "(B, 1)"])
def test_rename_dataframe(self):
pdf1 = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
psdf1 = ps.from_pandas(pdf1)
self.assert_eq(
psdf1.rename(columns={"A": "a", "B": "b"}), pdf1.rename(columns={"A": "a", "B": "b"})
)
result_psdf = psdf1.rename(index={1: 10, 2: 20})
result_pdf = pdf1.rename(index={1: 10, 2: 20})
self.assert_eq(result_psdf, result_pdf)
# inplace
pser = result_pdf.A
psser = result_psdf.A
result_psdf.rename(index={10: 100, 20: 200}, inplace=True)
result_pdf.rename(index={10: 100, 20: 200}, inplace=True)
self.assert_eq(result_psdf, result_pdf)
self.assert_eq(psser, pser)
def str_lower(s) -> str:
return str.lower(s)
self.assert_eq(
psdf1.rename(str_lower, axis="columns"), pdf1.rename(str_lower, axis="columns")
)
def mul10(x) -> int:
return x * 10
self.assert_eq(psdf1.rename(mul10, axis="index"), pdf1.rename(mul10, axis="index"))
self.assert_eq(
psdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
pdf1.rename(columns=str_lower, index={1: 10, 2: 20}),
)
idx = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Y", "D")])
pdf2 = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf2.rename(columns=str_lower), pdf2.rename(columns=str_lower))
self.assert_eq(
psdf2.rename(columns=str_lower, level=0), pdf2.rename(columns=str_lower, level=0)
)
self.assert_eq(
psdf2.rename(columns=str_lower, level=1), pdf2.rename(columns=str_lower, level=1)
)
pdf3 = pd.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list("ab"))
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(psdf3.rename(index=str_lower), pdf3.rename(index=str_lower))
self.assert_eq(
psdf3.rename(index=str_lower, level=0), pdf3.rename(index=str_lower, level=0)
)
self.assert_eq(
psdf3.rename(index=str_lower, level=1), pdf3.rename(index=str_lower, level=1)
)
pdf4 = pdf2 + 1
psdf4 = psdf2 + 1
self.assert_eq(psdf4.rename(columns=str_lower), pdf4.rename(columns=str_lower))
pdf5 = pdf3 + 1
psdf5 = psdf3 + 1
self.assert_eq(psdf5.rename(index=str_lower), pdf5.rename(index=str_lower))
msg = "Either `index` or `columns` should be provided."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename()
msg = "`mapper` or `index` or `columns` should be either dict-like or function type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename(mapper=[str_lower], axis=1)
msg = "Mapper dict should have the same value type."
with self.assertRaisesRegex(ValueError, msg):
psdf1.rename({"A": "a", "B": 2}, axis=1)
msg = r"level should be an integer between \[0, column_labels_level\)"
with self.assertRaisesRegex(ValueError, msg):
psdf2.rename(columns=str_lower, level=2)
def test_rename_axis(self):
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis("index2", axis=axis).sort_index(),
psdf.rename_axis("index2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["index2"], axis=axis).sort_index(),
psdf.rename_axis(["index2"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis("cols2", axis=axis).sort_index(),
psdf.rename_axis("cols2", axis=axis).sort_index(),
)
self.assert_eq(
pdf.rename_axis(["cols2"], axis=axis).sort_index(),
psdf.rename_axis(["cols2"], axis=axis).sort_index(),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pdf2.rename_axis("index2", axis="index", inplace=True)
psdf2.rename_axis("index2", axis="index", inplace=True)
self.assert_eq(pdf2.sort_index(), psdf2.sort_index())
self.assertRaises(ValueError, lambda: psdf.rename_axis(["index2", "index3"], axis=0))
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols2", "cols3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.rename_axis(mapper=["index2"], index=["index3"]))
self.assert_eq(
pdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
psdf.rename_axis(index={"index": "index2"}, columns={"cols": "cols2"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index2"}, columns={"missing": "cols2"}).sort_index(),
psdf.rename_axis(
index={"missing": "index2"}, columns={"missing": "cols2"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["index1", "index2"]
)
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
pdf = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
for axis in [0, "index"]:
self.assert_eq(
pdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
psdf.rename_axis(["index3", "index4"], axis=axis).sort_index(),
)
for axis in [1, "columns"]:
self.assert_eq(
pdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
psdf.rename_axis(["cols3", "cols4"], axis=axis).sort_index(),
)
self.assertRaises(
ValueError, lambda: psdf.rename_axis(["index3", "index4", "index5"], axis=0)
)
self.assertRaises(ValueError, lambda: psdf.rename_axis(["cols3", "cols4", "cols5"], axis=1))
self.assert_eq(
pdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
psdf.rename_axis(index={"index1": "index3"}, columns={"cols1": "cols3"}).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index={"missing": "index3"}, columns={"missing": "cols3"}).sort_index(),
psdf.rename_axis(
index={"missing": "index3"}, columns={"missing": "cols3"}
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
psdf.rename_axis(
index={"index1": "index3", "index2": "index4"},
columns={"cols1": "cols3", "cols2": "cols4"},
).sort_index(),
)
self.assert_eq(
pdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
psdf.rename_axis(index=str.upper, columns=str.upper).sort_index(),
)
def test_dot(self):
psdf = self.psdf
with self.assertRaisesRegex(TypeError, "Unsupported type DataFrame"):
psdf.dot(psdf)
def test_dot_in_column_name(self):
self.assert_eq(
ps.DataFrame(ps.range(1)._internal.spark_frame.selectExpr("1L as `a.b`"))["a.b"],
ps.Series([1], name="a.b"),
)
def test_aggregate(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=["A", "B", "C"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(), # TODO?: fix column order
pdf.agg(["sum", "min"])[["A", "B", "C"]].sort_index(),
)
self.assert_eq(
psdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
pdf.agg({"A": ["sum", "min"], "B": ["min", "max"]})[["A", "B"]].sort_index(),
)
self.assertRaises(KeyError, lambda: psdf.agg({"A": ["sum", "min"], "X": ["min", "max"]}))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
pdf.agg(["sum", "min"])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
pdf.agg({("X", "A"): ["sum", "min"], ("X", "B"): ["min", "max"]})[
[("X", "A"), ("X", "B")]
].sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.agg({"X": ["sum", "min"], "Y": ["min", "max"]}))
# non-string names
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [np.nan, np.nan, np.nan]], columns=[10, 20, 30]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
pdf.agg(["sum", "min"])[[10, 20, 30]].sort_index(),
)
self.assert_eq(
psdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
pdf.agg({10: ["sum", "min"], 20: ["min", "max"]})[[10, 20]].sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", 10), ("X", 20), ("Y", 30)])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
pdf.agg(["sum", "min"])[[("X", 10), ("X", 20), ("Y", 30)]].sort_index(),
)
self.assert_eq(
psdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
pdf.agg({("X", 10): ["sum", "min"], ("X", 20): ["min", "max"]})[
[("X", 10), ("X", 20)]
].sort_index(),
)
pdf = pd.DataFrame(
[datetime(2019, 2, 2, 0, 0, 0, 0), datetime(2019, 2, 3, 0, 0, 0, 0)],
columns=["timestamp"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.timestamp.min(), pdf.timestamp.min())
self.assert_eq(psdf.timestamp.max(), pdf.timestamp.max())
self.assertRaises(ValueError, lambda: psdf.agg(("sum", "min")))
def test_droplevel(self):
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
pdf.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
psdf = ps.from_pandas(pdf)
self.assertRaises(ValueError, lambda: psdf.droplevel(["a", "b"]))
self.assertRaises(ValueError, lambda: psdf.droplevel([1, 1, 1, 1, 1]))
self.assertRaises(IndexError, lambda: psdf.droplevel(2))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a"}))
self.assertRaises(KeyError, lambda: psdf.droplevel({"a": 1}))
self.assertRaises(ValueError, lambda: psdf.droplevel(["level_1", "level_2"], axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(2, axis=1))
self.assertRaises(IndexError, lambda: psdf.droplevel(-3, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1"}, axis=1))
self.assertRaises(KeyError, lambda: psdf.droplevel({"level_1": 1}, axis=1))
self.assert_eq(pdf.droplevel("a"), psdf.droplevel("a"))
self.assert_eq(pdf.droplevel(["a"]), psdf.droplevel(["a"]))
self.assert_eq(pdf.droplevel(("a",)), psdf.droplevel(("a",)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel("level_1", axis=1), psdf.droplevel("level_1", axis=1))
self.assert_eq(pdf.droplevel(["level_1"], axis=1), psdf.droplevel(["level_1"], axis=1))
self.assert_eq(pdf.droplevel(("level_1",), axis=1), psdf.droplevel(("level_1",), axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
self.assert_eq(pdf.droplevel(-1, axis=1), psdf.droplevel(-1, axis=1))
# Tupled names
pdf.columns.names = [("level", 1), ("level", 2)]
pdf.index.names = [("a", 10), ("x", 20)]
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.droplevel("a"))
self.assertRaises(KeyError, lambda: psdf.droplevel(("a", 10)))
self.assert_eq(pdf.droplevel([("a", 10)]), psdf.droplevel([("a", 10)]))
self.assert_eq(
pdf.droplevel([("level", 1)], axis=1), psdf.droplevel([("level", 1)], axis=1)
)
# non-string names
pdf = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis([10.0, 20.0])
)
pdf.columns = pd.MultiIndex.from_tuples([("c", "e"), ("d", "f")], names=[100.0, 200.0])
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.droplevel(10.0), psdf.droplevel(10.0))
self.assert_eq(pdf.droplevel([10.0]), psdf.droplevel([10.0]))
self.assert_eq(pdf.droplevel((10.0,)), psdf.droplevel((10.0,)))
self.assert_eq(pdf.droplevel(0), psdf.droplevel(0))
self.assert_eq(pdf.droplevel(-1), psdf.droplevel(-1))
self.assert_eq(pdf.droplevel(100.0, axis=1), psdf.droplevel(100.0, axis=1))
self.assert_eq(pdf.droplevel(0, axis=1), psdf.droplevel(0, axis=1))
def test_drop(self):
pdf = pd.DataFrame({"x": [1, 2], "y": [3, 4], "z": [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
# Assert 'labels' or 'columns' parameter is set
expected_error_message = "Need to specify at least one of 'labels' or 'columns'"
with self.assertRaisesRegex(ValueError, expected_error_message):
psdf.drop()
#
# Drop columns
#
# Assert using a str for 'labels' works
self.assert_eq(psdf.drop("x", axis=1), pdf.drop("x", axis=1))
self.assert_eq((psdf + 1).drop("x", axis=1), (pdf + 1).drop("x", axis=1))
# Assert using a list for 'labels' works
self.assert_eq(psdf.drop(["y", "z"], axis=1), pdf.drop(["y", "z"], axis=1))
self.assert_eq(psdf.drop(["x", "y", "z"], axis=1), pdf.drop(["x", "y", "z"], axis=1))
# Assert using 'columns' instead of 'labels' produces the same results
self.assert_eq(psdf.drop(columns="x"), pdf.drop(columns="x"))
self.assert_eq(psdf.drop(columns=["y", "z"]), pdf.drop(columns=["y", "z"]))
self.assert_eq(psdf.drop(columns=["x", "y", "z"]), pdf.drop(columns=["x", "y", "z"]))
self.assert_eq(psdf.drop(columns=[]), pdf.drop(columns=[]))
columns = pd.MultiIndex.from_tuples([(1, "x"), (1, "y"), (2, "z")])
pdf.columns = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(columns=1), pdf.drop(columns=1))
self.assert_eq(psdf.drop(columns=(1, "x")), pdf.drop(columns=(1, "x")))
self.assert_eq(psdf.drop(columns=[(1, "x"), 2]), pdf.drop(columns=[(1, "x"), 2]))
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
self.assertRaises(KeyError, lambda: psdf.drop(columns=3))
self.assertRaises(KeyError, lambda: psdf.drop(columns=(1, "z")))
pdf.index = pd.MultiIndex.from_tuples([("i", 0), ("j", 1)])
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
pdf.drop(columns=[(1, "x"), (1, "y"), (2, "z")]),
)
# non-string names
pdf = pd.DataFrame({10: [1, 2], 20: [3, 4], 30: [5, 6]}, index=np.random.rand(2))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(10, axis=1), pdf.drop(10, axis=1))
self.assert_eq(psdf.drop([20, 30], axis=1), pdf.drop([20, 30], axis=1))
#
# Drop rows
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
# Given labels (and axis = 0)
self.assert_eq(psdf.drop(labels="A", axis=0), pdf.drop(labels="A", axis=0))
self.assert_eq(psdf.drop(labels="A"), pdf.drop(labels="A"))
self.assert_eq((psdf + 1).drop(labels="A"), (pdf + 1).drop(labels="A"))
self.assert_eq(psdf.drop(labels=["A", "C"], axis=0), pdf.drop(labels=["A", "C"], axis=0))
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=["A", "B", "C"], axis=0), pdf.drop(labels=["A", "B", "C"], axis=0)
)
# Given index
self.assert_eq(psdf.drop(index="A"), pdf.drop(index="A"))
self.assert_eq(psdf.drop(index=["A", "C"]), pdf.drop(index=["A", "C"]))
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
self.assert_eq(psdf.drop(index=[]), pdf.drop(index=[]))
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(psdf.drop(index=["A", "B", "C"]), pdf.drop(index=["A", "B", "C"]))
# Non-string names
pdf.index = [10, 20, 30]
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(labels=10, axis=0), pdf.drop(labels=10, axis=0))
self.assert_eq(psdf.drop(labels=[10, 30], axis=0), pdf.drop(labels=[10, 30], axis=0))
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(labels=[10, 20, 30], axis=0), pdf.drop(labels=[10, 20, 30], axis=0)
)
# MultiIndex
pdf.index = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assertRaises(NotImplementedError, lambda: psdf.drop(labels=[("a", "x")]))
#
# Drop rows and columns
#
pdf = pd.DataFrame({"X": [1, 2, 3], "Y": [4, 5, 6], "Z": [7, 8, 9]}, index=["A", "B", "C"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.drop(index="A", columns="X"), pdf.drop(index="A", columns="X"))
self.assert_eq(
psdf.drop(index=["A", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
with ps.option_context("compute.isin_limit", 2):
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
pdf.drop(index=["A", "B", "C"], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=[], columns=["X", "Z"]),
pdf.drop(index=[], columns=["X", "Z"]),
)
self.assert_eq(
psdf.drop(index=["A", "B", "C"], columns=[]),
pdf.drop(index=["A", "B", "C"], columns=[]),
)
self.assert_eq(
psdf.drop(index=[], columns=[]),
pdf.drop(index=[], columns=[]),
)
self.assertRaises(
ValueError,
lambda: psdf.drop(labels="A", axis=0, columns="X"),
)
def _test_dropna(self, pdf, axis):
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(psdf.dropna(axis=axis, subset=["x"]), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(psdf.dropna(axis=axis, subset="x"), pdf.dropna(axis=axis, subset=["x"]))
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"]), pdf.dropna(axis=axis, subset=["y", "z"])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=["y", "z"], how="all"),
pdf.dropna(axis=axis, subset=["y", "z"], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
pdf.dropna(axis=axis, thresh=1, subset=["y", "z"]),
)
pdf2 = pdf.copy()
psdf2 = psdf.copy()
pser = pdf2[pdf2.columns[0]]
psser = psdf2[psdf2.columns[0]]
pdf2.dropna(inplace=True, axis=axis)
psdf2.dropna(inplace=True, axis=axis)
self.assert_eq(psdf2, pdf2)
self.assert_eq(psser, pser)
# multi-index
columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
if axis == 0:
pdf.columns = columns
else:
pdf.index = columns
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=axis), pdf.dropna(axis=axis))
self.assert_eq(psdf.dropna(axis=axis, how="all"), pdf.dropna(axis=axis, how="all"))
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "x")]), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=("a", "x")), pdf.dropna(axis=axis, subset=[("a", "x")])
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")]),
)
self.assert_eq(
psdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
pdf.dropna(axis=axis, subset=[("a", "y"), ("b", "z")], how="all"),
)
self.assert_eq(psdf.dropna(axis=axis, thresh=2), pdf.dropna(axis=axis, thresh=2))
self.assert_eq(
psdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
pdf.dropna(axis=axis, thresh=1, subset=[("a", "y"), ("b", "z")]),
)
def test_dropna_axis_index(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self._test_dropna(pdf, axis=0)
# empty
pdf = pd.DataFrame(index=np.random.rand(6))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(), pdf.dropna())
self.assert_eq(psdf.dropna(how="all"), pdf.dropna(how="all"))
self.assert_eq(psdf.dropna(thresh=0), pdf.dropna(thresh=0))
self.assert_eq(psdf.dropna(thresh=1), pdf.dropna(thresh=1))
with self.assertRaisesRegex(ValueError, "No axis named foo"):
psdf.dropna(axis="foo")
self.assertRaises(KeyError, lambda: psdf.dropna(subset="1"))
with self.assertRaisesRegex(ValueError, "invalid how option: 1"):
psdf.dropna(how=1)
with self.assertRaisesRegex(TypeError, "must specify how or thresh"):
psdf.dropna(how=None)
def test_dropna_axis_column(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=[str(r) for r in np.random.rand(6)],
).T
self._test_dropna(pdf, axis=1)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
ValueError, "The length of each subset must be the same as the index size."
):
psdf.dropna(subset=(["x", "y"]), axis=1)
# empty
pdf = pd.DataFrame({"x": [], "y": [], "z": []})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dropna(axis=1), pdf.dropna(axis=1))
self.assert_eq(psdf.dropna(axis=1, how="all"), pdf.dropna(axis=1, how="all"))
self.assert_eq(psdf.dropna(axis=1, thresh=0), pdf.dropna(axis=1, thresh=0))
self.assert_eq(psdf.dropna(axis=1, thresh=1), pdf.dropna(axis=1, thresh=1))
def test_dtype(self):
pdf = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
# multi-index columns
columns = pd.MultiIndex.from_tuples(zip(list("xxxyyz"), list("abcdef")))
pdf.columns = columns
psdf.columns = columns
self.assertTrue((psdf.dtypes == pdf.dtypes).all())
def test_fillna(self):
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({"x": -1, "y": -2, "z": -5}), pdf.fillna({"x": -1, "y": -2, "z": -5})
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
pdf = pdf.set_index(["x", "y"])
psdf = ps.from_pandas(pdf)
# check multi index
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
pser = pdf.z
psser = psdf.z
pdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
psdf.fillna({"x": -1, "y": -2, "z": -5}, inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
s_nan = pd.Series([-1, -2, -5], index=["x", "y", "z"], dtype=int)
self.assert_eq(psdf.fillna(s_nan), pdf.fillna(s_nan))
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis=1)
with self.assertRaisesRegex(NotImplementedError, "fillna currently only"):
psdf.fillna(-1, axis="columns")
with self.assertRaisesRegex(ValueError, "limit parameter for value is not support now"):
psdf.fillna(-1, limit=1)
with self.assertRaisesRegex(TypeError, "Unsupported.*DataFrame"):
psdf.fillna(pd.DataFrame({"x": [-1], "y": [-1], "z": [-1]}))
with self.assertRaisesRegex(TypeError, "Unsupported.*int64"):
psdf.fillna({"x": np.int64(-6), "y": np.int64(-4), "z": -5})
with self.assertRaisesRegex(ValueError, "Expecting 'pad', 'ffill', 'backfill' or 'bfill'."):
psdf.fillna(method="xxx")
with self.assertRaisesRegex(
ValueError, "Must specify a fillna 'value' or 'method' parameter."
):
psdf.fillna()
# multi-index columns
pdf = pd.DataFrame(
{
("x", "a"): [np.nan, 2, 3, 4, np.nan, 6],
("x", "b"): [1, 2, np.nan, 4, np.nan, np.nan],
("y", "c"): [1, 2, 3, 4, np.nan, np.nan],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
self.assert_eq(pdf.fillna(method="ffill"), psdf.fillna(method="ffill"))
self.assert_eq(pdf.fillna(method="ffill", limit=2), psdf.fillna(method="ffill", limit=2))
self.assert_eq(pdf.fillna(method="bfill"), psdf.fillna(method="bfill"))
self.assert_eq(pdf.fillna(method="bfill", limit=2), psdf.fillna(method="bfill", limit=2))
self.assert_eq(psdf.fillna({"x": -1}), pdf.fillna({"x": -1}))
self.assert_eq(
psdf.fillna({"x": -1, ("x", "b"): -2}), pdf.fillna({"x": -1, ("x", "b"): -2})
)
self.assert_eq(
psdf.fillna({("x", "b"): -2, "x": -1}), pdf.fillna({("x", "b"): -2, "x": -1})
)
# check multi index
pdf = pdf.set_index([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.fillna(-1), pdf.fillna(-1))
self.assert_eq(
psdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
pdf.fillna({("x", "a"): -1, ("x", "b"): -2, ("y", "c"): -5}),
)
def test_isnull(self):
pdf = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")}, index=np.random.rand(6)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.notnull(), pdf.notnull())
self.assert_eq(psdf.isnull(), pdf.isnull())
def test_to_datetime(self):
pdf = pd.DataFrame(
{"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}, index=np.random.rand(2)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pd.to_datetime(pdf), ps.to_datetime(psdf))
def test_nunique(self):
pdf = pd.DataFrame({"A": [1, 2, 3], "B": [np.nan, 3, np.nan]}, index=np.random.rand(3))
psdf = ps.from_pandas(pdf)
# Assert NaNs are dropped by default
self.assert_eq(psdf.nunique(), pdf.nunique())
# Assert including NaN values
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
# Assert approximate counts
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True),
pd.Series([103], index=["A"]),
)
self.assert_eq(
ps.DataFrame({"A": range(100)}).nunique(approx=True, rsd=0.01),
pd.Series([100], index=["A"]),
)
# Assert unsupported axis value yet
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.nunique(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("Y", "B")], names=["1", "2"])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.nunique(), pdf.nunique())
self.assert_eq(psdf.nunique(dropna=False), pdf.nunique(dropna=False))
def test_sort_values(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
for ascending in [True, False]:
for na_position in ["first", "last"]:
self.assert_eq(
psdf.sort_values("a", ascending=ascending, na_position=na_position),
pdf.sort_values("a", ascending=ascending, na_position=na_position),
)
self.assert_eq(psdf.sort_values(["a", "b"]), pdf.sort_values(["a", "b"]))
self.assert_eq(
psdf.sort_values(["a", "b"], ascending=[False, True]),
pdf.sort_values(["a", "b"], ascending=[False, True]),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], ascending=[False]))
self.assert_eq(
psdf.sort_values(["a", "b"], na_position="first"),
pdf.sort_values(["a", "b"], na_position="first"),
)
self.assertRaises(ValueError, lambda: psdf.sort_values(["b", "a"], na_position="invalid"))
pserA = pdf.a
psserA = psdf.a
self.assert_eq(psdf.sort_values("b", inplace=True), pdf.sort_values("b", inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# multi-index columns
pdf = pd.DataFrame(
{("X", 10): [1, 2, 3, 4, 5, None, 7], ("X", 20): [7, 6, 5, 4, 3, 2, 1]},
index=np.random.rand(7),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(("X", 20)), pdf.sort_values(("X", 20)))
self.assert_eq(
psdf.sort_values([("X", 20), ("X", 10)]), pdf.sort_values([("X", 20), ("X", 10)])
)
self.assertRaisesRegex(
ValueError,
"For a multi-index, the label must be a tuple with elements",
lambda: psdf.sort_values(["X"]),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 5, None, 7], 20: [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_values(20), pdf.sort_values(20))
self.assert_eq(psdf.sort_values([20, 10]), pdf.sort_values([20, 10]))
def test_sort_index(self):
pdf = pd.DataFrame(
{"A": [2, 1, np.nan], "B": [np.nan, 0, np.nan]}, index=["b", "a", np.nan]
)
psdf = ps.from_pandas(pdf)
# Assert invalid parameters
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(axis=1))
self.assertRaises(NotImplementedError, lambda: psdf.sort_index(kind="mergesort"))
self.assertRaises(ValueError, lambda: psdf.sort_index(na_position="invalid"))
# Assert default behavior without parameters
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Assert sorting descending
self.assert_eq(psdf.sort_index(ascending=False), pdf.sort_index(ascending=False))
# Assert sorting NA indices first
self.assert_eq(psdf.sort_index(na_position="first"), pdf.sort_index(na_position="first"))
# Assert sorting descending and NA indices first
self.assert_eq(
psdf.sort_index(ascending=False, na_position="first"),
pdf.sort_index(ascending=False, na_position="first"),
)
# Assert sorting inplace
pserA = pdf.A
psserA = psdf.A
self.assertEqual(psdf.sort_index(inplace=True), pdf.sort_index(inplace=True))
self.assert_eq(psdf, pdf)
self.assert_eq(psserA, pserA)
# Assert multi-indices
pdf = pd.DataFrame(
{"A": range(4), "B": range(4)[::-1]}, index=[["b", "b", "a", "a"], [1, 0, 1, 0]]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_index(level=[1, 0]), pdf.sort_index(level=[1, 0]))
self.assert_eq(psdf.reset_index().sort_index(), pdf.reset_index().sort_index())
# Assert with multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_swaplevel(self):
# MultiIndex with two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
# MultiIndex with more than two levels
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"], ["l", "m", "s", "xs"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf = pd.DataFrame({"x1": ["a", "b", "c", "d"], "x2": ["a", "b", "c", "d"]}, index=pidx)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(), psdf.swaplevel())
self.assert_eq(pdf.swaplevel(0, 1), psdf.swaplevel(0, 1))
self.assert_eq(pdf.swaplevel(0, 2), psdf.swaplevel(0, 2))
self.assert_eq(pdf.swaplevel(1, 2), psdf.swaplevel(1, 2))
self.assert_eq(pdf.swaplevel(1, 1), psdf.swaplevel(1, 1))
self.assert_eq(pdf.swaplevel(-1, -2), psdf.swaplevel(-1, -2))
self.assert_eq(pdf.swaplevel("number", "color"), psdf.swaplevel("number", "color"))
self.assert_eq(pdf.swaplevel("number", "size"), psdf.swaplevel("number", "size"))
self.assert_eq(pdf.swaplevel("color", "size"), psdf.swaplevel("color", "size"))
self.assert_eq(
pdf.swaplevel("color", "size", axis="index"),
psdf.swaplevel("color", "size", axis="index"),
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=0), psdf.swaplevel("color", "size", axis=0)
)
pdf = pd.DataFrame(
{
"x1": ["a", "b", "c", "d"],
"x2": ["a", "b", "c", "d"],
"x3": ["a", "b", "c", "d"],
"x4": ["a", "b", "c", "d"],
}
)
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color", "size"))
pdf.columns = pidx
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.swaplevel(axis=1), psdf.swaplevel(axis=1))
self.assert_eq(pdf.swaplevel(0, 1, axis=1), psdf.swaplevel(0, 1, axis=1))
self.assert_eq(pdf.swaplevel(0, 2, axis=1), psdf.swaplevel(0, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 2, axis=1), psdf.swaplevel(1, 2, axis=1))
self.assert_eq(pdf.swaplevel(1, 1, axis=1), psdf.swaplevel(1, 1, axis=1))
self.assert_eq(pdf.swaplevel(-1, -2, axis=1), psdf.swaplevel(-1, -2, axis=1))
self.assert_eq(
pdf.swaplevel("number", "color", axis=1), psdf.swaplevel("number", "color", axis=1)
)
self.assert_eq(
pdf.swaplevel("number", "size", axis=1), psdf.swaplevel("number", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis=1), psdf.swaplevel("color", "size", axis=1)
)
self.assert_eq(
pdf.swaplevel("color", "size", axis="columns"),
psdf.swaplevel("color", "size", axis="columns"),
)
# Error conditions
self.assertRaises(AssertionError, lambda: ps.DataFrame([1, 2]).swaplevel())
self.assertRaises(IndexError, lambda: psdf.swaplevel(0, 9, axis=1))
self.assertRaises(KeyError, lambda: psdf.swaplevel("not_number", "color", axis=1))
self.assertRaises(ValueError, lambda: psdf.swaplevel(axis=2))
def test_swapaxes(self):
pdf = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["x", "y", "z"], columns=["a", "b", "c"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.swapaxes(0, 1), pdf.swapaxes(0, 1))
self.assert_eq(psdf.swapaxes(1, 0), pdf.swapaxes(1, 0))
self.assert_eq(psdf.swapaxes("index", "columns"), pdf.swapaxes("index", "columns"))
self.assert_eq(psdf.swapaxes("columns", "index"), pdf.swapaxes("columns", "index"))
self.assert_eq((psdf + 1).swapaxes(0, 1), (pdf + 1).swapaxes(0, 1))
self.assertRaises(AssertionError, lambda: psdf.swapaxes(0, 1, copy=False))
self.assertRaises(ValueError, lambda: psdf.swapaxes(0, -1))
def test_nlargest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nlargest(n=5, columns="a"), pdf.nlargest(5, columns="a"))
self.assert_eq(psdf.nlargest(n=5, columns=["a", "b"]), pdf.nlargest(5, columns=["a", "b"]))
def test_nsmallest(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, None, 7], "b": [7, 6, 5, 4, 3, 2, 1]}, index=np.random.rand(7)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.nsmallest(n=5, columns="a"), pdf.nsmallest(5, columns="a"))
self.assert_eq(
psdf.nsmallest(n=5, columns=["a", "b"]), pdf.nsmallest(5, columns=["a", "b"])
)
def test_xs(self):
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "locomotion"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs("mammal"), pdf.xs("mammal"))
self.assert_eq(psdf.xs(("mammal",)), pdf.xs(("mammal",)))
self.assert_eq(psdf.xs(("mammal", "dog", "walks")), pdf.xs(("mammal", "dog", "walks")))
self.assert_eq(
ps.concat([psdf, psdf]).xs(("mammal", "dog", "walks")),
pd.concat([pdf, pdf]).xs(("mammal", "dog", "walks")),
)
self.assert_eq(psdf.xs("cat", level=1), pdf.xs("cat", level=1))
self.assert_eq(psdf.xs("flies", level=2), pdf.xs("flies", level=2))
self.assert_eq(psdf.xs("mammal", level=-3), pdf.xs("mammal", level=-3))
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.xs("num_wings", axis=1)
with self.assertRaises(KeyError):
psdf.xs(("mammal", "dog", "walk"))
msg = r"'Key length \(4\) exceeds index depth \(3\)'"
with self.assertRaisesRegex(KeyError, msg):
psdf.xs(("mammal", "dog", "walks", "foo"))
msg = "'key' should be a scalar value or tuple that contains scalar values"
with self.assertRaisesRegex(TypeError, msg):
psdf.xs(["mammal", "dog", "walks", "foo"])
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=-4))
self.assertRaises(IndexError, lambda: psdf.xs("foo", level=3))
self.assertRaises(KeyError, lambda: psdf.xs(("dog", "walks"), level=1))
# non-string names
pdf = pd.DataFrame(data=d)
pdf = pdf.set_index(["class", "animal", "num_legs", "num_wings"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.xs(("mammal", "dog", 4)), pdf.xs(("mammal", "dog", 4)))
self.assert_eq(psdf.xs(2, level=2), pdf.xs(2, level=2))
self.assert_eq((psdf + "a").xs(("mammal", "dog", 4)), (pdf + "a").xs(("mammal", "dog", 4)))
self.assert_eq((psdf + "a").xs(2, level=2), (pdf + "a").xs(2, level=2))
def test_missing(self):
psdf = self.psdf
missing_functions = inspect.getmembers(_MissingPandasLikeDataFrame, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)()
missing_properties = inspect.getmembers(
_MissingPandasLikeDataFrame, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*DataFrame.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*DataFrame.*{}.*is deprecated".format(name)
):
getattr(psdf, name)
def test_to_numpy(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.to_numpy(), pdf.values)
def test_to_pandas(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.to_pandas(), pdf)
def test_isin(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 9, 4, 2, 4],
"c": ["one", "three", "six", "seven", "one", "5"],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.isin([4, "six"]), pdf.isin([4, "six"]))
# Seems like pandas has a bug when passing `np.array` as parameter
self.assert_eq(psdf.isin(np.array([4, "six"])), pdf.isin([4, "six"]))
self.assert_eq(
psdf.isin({"a": [2, 8], "c": ["three", "one"]}),
pdf.isin({"a": [2, 8], "c": ["three", "one"]}),
)
self.assert_eq(
psdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
pdf.isin({"a": np.array([2, 8]), "c": ["three", "one"]}),
)
msg = "'DataFrame' object has no attribute {'e'}"
with self.assertRaisesRegex(AttributeError, msg):
psdf.isin({"e": [5, 7], "a": [1, 6]})
msg = "DataFrame and Series are not supported"
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.isin(pdf)
msg = "Values should be iterable, Series, DataFrame or dict."
with self.assertRaisesRegex(TypeError, msg):
psdf.isin(1)
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, None, 9, 4, None, 4],
"c": [None, 5, None, 3, 2, 1],
},
)
psdf = ps.from_pandas(pdf)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), pdf.isin([4, 3, 1, 1, None]))
else:
expected = pd.DataFrame(
{
"a": [True, False, True, True, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, True, False, True],
}
)
self.assert_eq(psdf.isin([4, 3, 1, 1, None]), expected)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.isin({"b": [4, 3, 1, 1, None]}), pdf.isin({"b": [4, 3, 1, 1, None]})
)
else:
expected = pd.DataFrame(
{
"a": [False, False, False, False, False, False],
"b": [True, False, False, True, False, True],
"c": [False, False, False, False, False, False],
}
)
self.assert_eq(psdf.isin({"b": [4, 3, 1, 1, None]}), expected)
def test_merge(self):
left_pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"value": [1, 2, 3, 5, 6, 7],
"x": list("abcdef"),
},
columns=["lkey", "value", "x"],
)
right_pdf = pd.DataFrame(
{
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [4, 5, 6, 7, 8, 9],
"y": list("efghij"),
},
columns=["rkey", "value", "y"],
)
right_ps = pd.Series(list("defghi"), name="x", index=[5, 6, 7, 8, 9, 10])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
right_psser = ps.from_pandas(right_ps)
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, on=("value",)))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
# MultiIndex
check(
lambda left, right: left.merge(
right, left_on=["lkey", "value"], right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.set_index(["lkey", "value"]).merge(
right, left_index=True, right_on=["rkey", "value"]
)
)
check(
lambda left, right: left.merge(
right.set_index(["rkey", "value"]), left_on=["lkey", "value"], right_index=True
)
)
# TODO: when both left_index=True and right_index=True with multi-index
# check(lambda left, right: left.set_index(['lkey', 'value']).merge(
# right.set_index(['rkey', 'value']), left_index=True, right_index=True))
# join types
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, on="value", how=how))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey", how=how))
# suffix
check(
lambda left, right: left.merge(
right, left_on="lkey", right_on="rkey", suffixes=["_left", "_right"]
)
)
# Test Series on the right
check(lambda left, right: left.merge(right), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x"), right_psser, right_ps
)
check(
lambda left, right: left.set_index("x").merge(right, left_index=True, right_on="x"),
right_psser,
right_ps,
)
# Test join types with Series
for how in ["inner", "left", "right", "outer"]:
check(lambda left, right: left.merge(right, how=how), right_psser, right_ps)
check(
lambda left, right: left.merge(right, left_on="x", right_on="x", how=how),
right_psser,
right_ps,
)
# suffix with Series
check(
lambda left, right: left.merge(
right,
suffixes=["_left", "_right"],
how="outer",
left_index=True,
right_index=True,
),
right_psser,
right_ps,
)
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([(10, "lkey"), (10, "value"), (20, "x")])
left_pdf.columns = left_columns
left_psdf.columns = left_columns
right_columns = pd.MultiIndex.from_tuples([(10, "rkey"), (10, "value"), (30, "y")])
right_pdf.columns = right_columns
right_psdf.columns = right_columns
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[(10, "value")]))
check(
lambda left, right: (left.set_index((10, "lkey")).merge(right.set_index((10, "rkey"))))
)
check(
lambda left, right: (
left.set_index((10, "lkey")).merge(
right.set_index((10, "rkey")), left_index=True, right_index=True
)
)
)
# TODO: when both left_index=True and right_index=True with multi-index columns
# check(lambda left, right: left.merge(right,
# left_on=[('a', 'lkey')], right_on=[('a', 'rkey')]))
# check(lambda left, right: (left.set_index(('a', 'lkey'))
# .merge(right, left_index=True, right_on=[('a', 'rkey')])))
# non-string names
left_pdf.columns = [10, 100, 1000]
left_psdf.columns = [10, 100, 1000]
right_pdf.columns = [20, 100, 2000]
right_psdf.columns = [20, 100, 2000]
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on=[100]))
check(lambda left, right: (left.set_index(10).merge(right.set_index(20))))
check(
lambda left, right: (
left.set_index(10).merge(right.set_index(20), left_index=True, right_index=True)
)
)
def test_merge_same_anchor(self):
pdf = pd.DataFrame(
{
"lkey": ["foo", "bar", "baz", "foo", "bar", "l"],
"rkey": ["baz", "foo", "bar", "baz", "foo", "r"],
"value": [1, 1, 3, 5, 6, 7],
"x": list("abcdef"),
"y": list("efghij"),
},
columns=["lkey", "rkey", "value", "x", "y"],
)
psdf = ps.from_pandas(pdf)
left_pdf = pdf[["lkey", "value", "x"]]
right_pdf = pdf[["rkey", "value", "y"]]
left_psdf = psdf[["lkey", "value", "x"]]
right_psdf = psdf[["rkey", "value", "y"]]
def check(op, right_psdf=right_psdf, right_pdf=right_pdf):
k_res = op(left_psdf, right_psdf)
k_res = k_res.to_pandas()
k_res = k_res.sort_values(by=list(k_res.columns))
k_res = k_res.reset_index(drop=True)
p_res = op(left_pdf, right_pdf)
p_res = p_res.sort_values(by=list(p_res.columns))
p_res = p_res.reset_index(drop=True)
self.assert_eq(k_res, p_res)
check(lambda left, right: left.merge(right))
check(lambda left, right: left.merge(right, on="value"))
check(lambda left, right: left.merge(right, left_on="lkey", right_on="rkey"))
check(lambda left, right: left.set_index("lkey").merge(right.set_index("rkey")))
check(
lambda left, right: left.set_index("lkey").merge(
right, left_index=True, right_on="rkey"
)
)
check(
lambda left, right: left.merge(
right.set_index("rkey"), left_on="lkey", right_index=True
)
)
check(
lambda left, right: left.set_index("lkey").merge(
right.set_index("rkey"), left_index=True, right_index=True
)
)
def test_merge_retains_indices(self):
left_pdf = pd.DataFrame({"A": [0, 1]})
right_pdf = pd.DataFrame({"B": [1, 2]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_index=True),
left_pdf.merge(right_pdf, left_index=True, right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_index=True),
left_pdf.merge(right_pdf, left_on="A", right_index=True),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_index=True, right_on="B"),
left_pdf.merge(right_pdf, left_index=True, right_on="B"),
)
self.assert_eq(
left_psdf.merge(right_psdf, left_on="A", right_on="B"),
left_pdf.merge(right_pdf, left_on="A", right_on="B"),
)
def test_merge_how_parameter(self):
left_pdf = pd.DataFrame({"A": [1, 2]})
right_pdf = pd.DataFrame({"B": ["x", "y"]}, index=[1, 2])
left_psdf = ps.from_pandas(left_pdf)
right_psdf = ps.from_pandas(right_pdf)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True)
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True)
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="left")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="left")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="right")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="right")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
psdf = left_psdf.merge(right_psdf, left_index=True, right_index=True, how="outer")
pdf = left_pdf.merge(right_pdf, left_index=True, right_index=True, how="outer")
self.assert_eq(
psdf.sort_values(by=list(psdf.columns)).reset_index(drop=True),
pdf.sort_values(by=list(pdf.columns)).reset_index(drop=True),
)
def test_merge_raises(self):
left = ps.DataFrame(
{"value": [1, 2, 3, 5, 6], "x": list("abcde")},
columns=["value", "x"],
index=["foo", "bar", "baz", "foo", "bar"],
)
right = ps.DataFrame(
{"value": [4, 5, 6, 7, 8], "y": list("fghij")},
columns=["value", "y"],
index=["baz", "foo", "bar", "baz", "foo"],
)
with self.assertRaisesRegex(ValueError, "No common columns to perform merge on"):
left[["x"]].merge(right[["y"]])
with self.assertRaisesRegex(ValueError, "not a combination of both"):
left.merge(right, on="value", left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_on="x")
with self.assertRaisesRegex(ValueError, "Must pass right_on or right_index=True"):
left.merge(right, left_index=True)
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_on="y")
with self.assertRaisesRegex(ValueError, "Must pass left_on or left_index=True"):
left.merge(right, right_index=True)
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on="value", right_on=["value", "y"])
with self.assertRaisesRegex(
ValueError, "len\\(left_keys\\) must equal len\\(right_keys\\)"
):
left.merge(right, left_on=["value", "x"], right_on="value")
with self.assertRaisesRegex(ValueError, "['inner', 'left', 'right', 'full', 'outer']"):
left.merge(right, left_index=True, right_index=True, how="foo")
with self.assertRaisesRegex(KeyError, "id"):
left.merge(right, on="id")
def test_append(self):
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"))
psdf = ps.from_pandas(pdf)
other_pdf = pd.DataFrame([[3, 4], [5, 6]], columns=list("BC"), index=[2, 3])
other_psdf = ps.from_pandas(other_pdf)
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
self.assert_eq(psdf.append(psdf, ignore_index=True), pdf.append(pdf, ignore_index=True))
# Assert DataFrames with non-matching columns
self.assert_eq(psdf.append(other_psdf), pdf.append(other_pdf))
# Assert appending a Series fails
msg = "DataFrames.append() does not support appending Series to DataFrames"
with self.assertRaises(TypeError, msg=msg):
psdf.append(psdf["A"])
# Assert using the sort parameter raises an exception
msg = "The 'sort' parameter is currently not supported"
with self.assertRaises(NotImplementedError, msg=msg):
psdf.append(psdf, sort=True)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
psdf.append(other_psdf, verify_integrity=True),
pdf.append(other_pdf, verify_integrity=True),
)
msg = "Indices have overlapping values"
with self.assertRaises(ValueError, msg=msg):
psdf.append(psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
psdf.append(psdf, ignore_index=True, verify_integrity=True),
pdf.append(pdf, ignore_index=True, verify_integrity=True),
)
# Assert appending multi-index DataFrames
multi_index_pdf = pd.DataFrame([[1, 2], [3, 4]], columns=list("AB"), index=[[2, 3], [4, 5]])
multi_index_psdf = ps.from_pandas(multi_index_pdf)
other_multi_index_pdf = pd.DataFrame(
[[5, 6], [7, 8]], columns=list("AB"), index=[[2, 3], [6, 7]]
)
other_multi_index_psdf = ps.from_pandas(other_multi_index_pdf)
self.assert_eq(
multi_index_psdf.append(multi_index_psdf), multi_index_pdf.append(multi_index_pdf)
)
# Assert DataFrames with non-matching columns
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf),
multi_index_pdf.append(other_multi_index_pdf),
)
# Assert using 'verify_integrity' only raises an exception for overlapping indices
self.assert_eq(
multi_index_psdf.append(other_multi_index_psdf, verify_integrity=True),
multi_index_pdf.append(other_multi_index_pdf, verify_integrity=True),
)
with self.assertRaises(ValueError, msg=msg):
multi_index_psdf.append(multi_index_psdf, verify_integrity=True)
# Skip integrity verification when ignore_index=True
self.assert_eq(
multi_index_psdf.append(multi_index_psdf, ignore_index=True, verify_integrity=True),
multi_index_pdf.append(multi_index_pdf, ignore_index=True, verify_integrity=True),
)
# Assert trying to append DataFrames with different index levels
msg = "Both DataFrames have to have the same number of index levels"
with self.assertRaises(ValueError, msg=msg):
psdf.append(multi_index_psdf)
# Skip index level check when ignore_index=True
self.assert_eq(
psdf.append(multi_index_psdf, ignore_index=True),
pdf.append(multi_index_pdf, ignore_index=True),
)
columns = pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.append(psdf), pdf.append(pdf))
def test_clip(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
# Assert list-like values are not accepted for 'lower' and 'upper'
msg = "List-like value are not supported for 'lower' and 'upper' at the moment"
with self.assertRaises(TypeError, msg=msg):
psdf.clip(lower=[1])
with self.assertRaises(TypeError, msg=msg):
psdf.clip(upper=[1])
# Assert no lower or upper
self.assert_eq(psdf.clip(), pdf.clip())
# Assert lower only
self.assert_eq(psdf.clip(1), pdf.clip(1))
# Assert upper only
self.assert_eq(psdf.clip(upper=3), pdf.clip(upper=3))
# Assert lower and upper
self.assert_eq(psdf.clip(1, 3), pdf.clip(1, 3))
pdf["clip"] = pdf.A.clip(lower=1, upper=3)
psdf["clip"] = psdf.A.clip(lower=1, upper=3)
self.assert_eq(psdf, pdf)
# Assert behavior on string values
str_psdf = ps.DataFrame({"A": ["a", "b", "c"]}, index=np.random.rand(3))
self.assert_eq(str_psdf.clip(1, 3), str_psdf)
def test_binary_operators(self):
pdf = pd.DataFrame(
{"A": [0, 2, 4], "B": [4, 2, 0], "X": [-1, 10, 0]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf + psdf.copy(), pdf + pdf.copy())
self.assert_eq(psdf + psdf.loc[:, ["A", "B"]], pdf + pdf.loc[:, ["A", "B"]])
self.assert_eq(psdf.loc[:, ["A", "B"]] + psdf, pdf.loc[:, ["A", "B"]] + pdf)
self.assertRaisesRegex(
ValueError,
"it comes from a different dataframe",
lambda: ps.range(10).add(ps.range(10)),
)
self.assertRaisesRegex(
TypeError,
"add with a sequence is currently not supported",
lambda: ps.range(10).add(ps.range(10).id),
)
psdf_other = psdf.copy()
psdf_other.columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
self.assertRaisesRegex(
ValueError,
"cannot join with no overlapping index names",
lambda: psdf.add(psdf_other),
)
def test_binary_operator_add(self):
# Positive
pdf = pd.DataFrame({"a": ["x"], "b": ["y"], "c": [1], "d": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] + psdf["b"], pdf["a"] + pdf["b"])
self.assert_eq(psdf["c"] + psdf["d"], pdf["c"] + pdf["d"])
# Negative
ks_err_msg = "Addition can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["c"] + "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" + psdf["c"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 + psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] + 1)
def test_binary_operator_sub(self):
# Positive
pdf = pd.DataFrame({"a": [2], "b": [1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] - psdf["b"], pdf["a"] - pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Subtraction can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] - "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" - psdf["b"])
ks_err_msg = "Subtraction can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 - psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - 1)
psdf = ps.DataFrame({"a": ["x"], "b": ["y"]})
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] - psdf["b"])
def test_binary_operator_truediv(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] / psdf["b"], pdf["a"] / pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "True division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] / "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" / psdf["b"])
ks_err_msg = "True division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] / psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 / psdf["a"])
def test_binary_operator_floordiv(self):
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Floor division can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] // psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 // psdf["a"])
ks_err_msg = "Floor division can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] // "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" // psdf["b"])
def test_binary_operator_mod(self):
# Positive
pdf = pd.DataFrame({"a": [3], "b": [2]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["a"] % psdf["b"], pdf["a"] % pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [1]})
ks_err_msg = "Modulo can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] % "literal")
ks_err_msg = "Modulo can not be applied to strings"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] % psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 1 % psdf["a"])
def test_binary_operator_multiply(self):
# Positive
pdf = pd.DataFrame({"a": ["x", "y"], "b": [1, 2], "c": [3, 4]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf["b"] * psdf["c"], pdf["b"] * pdf["c"])
self.assert_eq(psdf["c"] * psdf["b"], pdf["c"] * pdf["b"])
self.assert_eq(psdf["a"] * psdf["b"], pdf["a"] * pdf["b"])
self.assert_eq(psdf["b"] * psdf["a"], pdf["b"] * pdf["a"])
self.assert_eq(psdf["a"] * 2, pdf["a"] * 2)
self.assert_eq(psdf["b"] * 2, pdf["b"] * 2)
self.assert_eq(2 * psdf["a"], 2 * pdf["a"])
self.assert_eq(2 * psdf["b"], 2 * pdf["b"])
# Negative
psdf = ps.DataFrame({"a": ["x"], "b": [2]})
ks_err_msg = "Multiplication can not be applied to given types"
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["b"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["b"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * "literal")
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: psdf["a"] * 0.1)
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: 0.1 * psdf["a"])
self.assertRaisesRegex(TypeError, ks_err_msg, lambda: "literal" * psdf["a"])
def test_sample(self):
pdf = pd.DataFrame({"A": [0, 2, 4]})
psdf = ps.from_pandas(pdf)
# Make sure the tests run, but we can't check the result because they are non-deterministic.
psdf.sample(frac=0.1)
psdf.sample(frac=0.2, replace=True)
psdf.sample(frac=0.2, random_state=5)
psdf["A"].sample(frac=0.2)
psdf["A"].sample(frac=0.2, replace=True)
psdf["A"].sample(frac=0.2, random_state=5)
with self.assertRaises(ValueError):
psdf.sample()
with self.assertRaises(NotImplementedError):
psdf.sample(n=1)
def test_add_prefix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_prefix("col_"), psdf.add_prefix("col_"))
def test_add_suffix(self):
pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [3, 4, 5, 6]}, index=np.random.rand(4))
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.add_suffix("first_series"), psdf.add_suffix("first_series"))
def test_join(self):
# check basic function
pdf1 = pd.DataFrame(
{"key": ["K0", "K1", "K2", "K3"], "A": ["A0", "A1", "A2", "A3"]}, columns=["key", "A"]
)
pdf2 = pd.DataFrame(
{"key": ["K0", "K1", "K2"], "B": ["B0", "B1", "B2"]}, columns=["key", "B"]
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# join with duplicated columns in Series
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
ks1 = ps.Series(["A1", "A5"], index=[1, 2], name="A")
psdf1.join(ks1, how="outer")
# join with duplicated columns in DataFrame
with self.assertRaisesRegex(ValueError, "columns overlap but no suffix specified"):
psdf1.join(psdf2, how="outer")
# check `on` parameter
join_pdf = pdf1.join(pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index("key").join(
pdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index("key").join(
psdf2.set_index("key"), on="key", lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index columns
columns1 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "A")])
columns2 = pd.MultiIndex.from_tuples([("x", "key"), ("Y", "B")])
pdf1.columns = columns1
pdf2.columns = columns2
psdf1.columns = columns1
psdf2.columns = columns2
join_pdf = pdf1.join(pdf2, lsuffix="_left", rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, lsuffix="_left", rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
# check `on` parameter
join_pdf = pdf1.join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
join_pdf = pdf1.set_index(("x", "key")).join(
pdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.set_index(("x", "key")).join(
psdf2.set_index(("x", "key")), on=[("x", "key")], lsuffix="_left", rsuffix="_right"
)
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf.reset_index(drop=True), join_psdf.reset_index(drop=True))
# multi-index
midx1 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")], names=["index1", "index2"]
)
midx2 = pd.MultiIndex.from_tuples(
[("w", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf1.index = midx1
pdf2.index = midx2
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
join_pdf = pdf1.join(pdf2, on=["index1", "index2"], rsuffix="_right")
join_pdf.sort_values(by=list(join_pdf.columns), inplace=True)
join_psdf = psdf1.join(psdf2, on=["index1", "index2"], rsuffix="_right")
join_psdf.sort_values(by=list(join_psdf.columns), inplace=True)
self.assert_eq(join_pdf, join_psdf)
with self.assertRaisesRegex(
ValueError, r'len\(left_on\) must equal the number of levels in the index of "right"'
):
psdf1.join(psdf2, on=["index1"], rsuffix="_right")
def test_replace(self):
pdf = pd.DataFrame(
{
"name": ["Ironman", "Captain America", "Thor", "Hulk"],
"weapon": ["Mark-45", "Shield", "Mjolnir", "Smash"],
},
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only for method='pad"
):
psdf.replace(method="bfill")
with self.assertRaisesRegex(
NotImplementedError, "replace currently works only when limit=None"
):
psdf.replace(limit=10)
with self.assertRaisesRegex(
NotImplementedError, "replace currently doesn't supports regex"
):
psdf.replace(regex="")
with self.assertRaisesRegex(ValueError, "Length of to_replace and value must be same"):
psdf.replace(to_replace=["Ironman"], value=["Spiderman", "Doctor Strange"])
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace("Ironman", lambda x: "Spiderman")
with self.assertRaisesRegex(TypeError, "Unsupported type function"):
psdf.replace(lambda x: "Ironman", "Spiderman")
self.assert_eq(psdf.replace("Ironman", "Spiderman"), pdf.replace("Ironman", "Spiderman"))
self.assert_eq(
psdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
pdf.replace(["Ironman", "Captain America"], ["Rescue", "Hawkeye"]),
)
self.assert_eq(
psdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
pdf.replace(("Ironman", "Captain America"), ("Rescue", "Hawkeye")),
)
# inplace
pser = pdf.name
psser = psdf.name
pdf.replace("Ironman", "Spiderman", inplace=True)
psdf.replace("Ironman", "Spiderman", inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
pdf = pd.DataFrame(
{"A": [0, 1, 2, 3, np.nan], "B": [5, 6, 7, 8, np.nan], "C": ["a", "b", "c", "d", None]},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
pdf.replace({"A": [0, np.nan], "B": [5, np.nan]}, 100),
)
self.assert_eq(
psdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"A": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({"X": {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(psdf.replace({"C": ["a", None]}, "e"), pdf.replace({"C": ["a", None]}, "e"))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.replace([0, 1, 2, 3, 5, 6], 4), pdf.replace([0, 1, 2, 3, 5, 6], 4))
self.assert_eq(
psdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
pdf.replace([0, 1, 2, 3, 5, 6], [6, 5, 4, 3, 2, 1]),
)
self.assert_eq(psdf.replace({0: 10, 1: 100, 7: 200}), pdf.replace({0: 10, 1: 100, 7: 200}))
self.assert_eq(
psdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
pdf.replace({("X", "A"): [0, np.nan], ("X", "B"): 5}, 100),
)
self.assert_eq(
psdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "A"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
pdf.replace({("X", "B"): {0: 100, 4: 400, np.nan: 700}}),
)
self.assert_eq(
psdf.replace({("Y", "C"): ["a", None]}, "e"),
pdf.replace({("Y", "C"): ["a", None]}, "e"),
)
def test_update(self):
# check base function
def get_data(left_columns=None, right_columns=None):
left_pdf = pd.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", np.nan, np.nan]}, columns=["A", "B"]
)
right_pdf = pd.DataFrame(
{"B": ["x", np.nan, "y", np.nan], "C": ["100", "200", "300", "400"]},
columns=["B", "C"],
)
left_psdf = ps.DataFrame(
{"A": ["1", "2", "3", "4"], "B": ["100", "200", None, None]}, columns=["A", "B"]
)
right_psdf = ps.DataFrame(
{"B": ["x", None, "y", None], "C": ["100", "200", "300", "400"]}, columns=["B", "C"]
)
if left_columns is not None:
left_pdf.columns = left_columns
left_psdf.columns = left_columns
if right_columns is not None:
right_pdf.columns = right_columns
right_psdf.columns = right_columns
return left_psdf, left_pdf, right_psdf, right_pdf
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
pser = left_pdf.B
psser = left_psdf.B
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
self.assert_eq(psser.sort_index(), pser.sort_index())
left_psdf, left_pdf, right_psdf, right_pdf = get_data()
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(left_pdf.sort_values(by=["A", "B"]), left_psdf.sort_values(by=["A", "B"]))
with self.assertRaises(NotImplementedError):
left_psdf.update(right_psdf, join="right")
# multi-index columns
left_columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")])
right_columns = pd.MultiIndex.from_tuples([("X", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf, overwrite=False)
left_psdf.update(right_psdf, overwrite=False)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
right_columns = pd.MultiIndex.from_tuples([("Y", "B"), ("Y", "C")])
left_psdf, left_pdf, right_psdf, right_pdf = get_data(
left_columns=left_columns, right_columns=right_columns
)
left_pdf.update(right_pdf)
left_psdf.update(right_psdf)
self.assert_eq(
left_pdf.sort_values(by=[("X", "A"), ("X", "B")]),
left_psdf.sort_values(by=[("X", "A"), ("X", "B")]),
)
def test_pivot_table_dtypes(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Skip columns comparison by reset_index
res_df = psdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
exp_df = pdf.pivot_table(
index=["c"], columns="a", values=["b"], aggfunc={"b": "mean"}
).dtypes.reset_index(drop=True)
self.assert_eq(res_df, exp_df)
# Results don't have the same column's name
# Todo: self.assert_eq(psdf.pivot_table(columns="a", values="b").dtypes,
# pdf.pivot_table(columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes,
# pdf.pivot_table(index=['e', 'c'], columns="a", values="b").dtypes)
# Todo: self.assert_eq(psdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes, pdf.pivot_table(index=['e', 'c'],
# columns="a", values="b", fill_value=999).dtypes)
def test_pivot_table(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [10, 20, 20, 40, 20, 40],
"c": [1, 2, 9, 4, 7, 4],
"d": [-1, -2, -3, -4, -5, -6],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
# Checking if both DataFrames have the same results
self.assert_eq(
psdf.pivot_table(columns="a", values="b").sort_index(),
pdf.pivot_table(columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values="b", aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
pdf.pivot_table(index=["c"], columns="a", values=["b"], aggfunc="sum").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e", "d"], aggfunc="sum"
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
pdf.pivot_table(
index=["c"], columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b").sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", fill_value=999
).sort_index(),
pdf.pivot_table(index=["e", "c"], columns="a", values="b", fill_value=999).sort_index(),
almost=True,
)
# multi-index columns
columns = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "e"), ("z", "c"), ("w", "d")]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pivot_table(columns=("x", "a"), values=("x", "b")).sort_index(),
pdf.pivot_table(columns=[("x", "a")], values=[("x", "b")]).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")], columns=[("x", "a")], values=[("x", "b"), ("y", "e")]
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")], columns=("x", "a"), values=[("x", "b"), ("y", "e"), ("w", "d")]
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e"), ("w", "d")],
).sort_index(),
almost=True,
)
self.assert_eq(
psdf.pivot_table(
index=[("z", "c")],
columns=("x", "a"),
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
pdf.pivot_table(
index=[("z", "c")],
columns=[("x", "a")],
values=[("x", "b"), ("y", "e")],
aggfunc={("x", "b"): "mean", ("y", "e"): "sum"},
).sort_index(),
almost=True,
)
def test_pivot_table_and_index(self):
# https://github.com/databricks/koalas/issues/805
pdf = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
ptable = pdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
ktable = psdf.pivot_table(
values="D", index=["A", "B"], columns="C", aggfunc="sum", fill_value=0
).sort_index()
self.assert_eq(ktable, ptable)
self.assert_eq(ktable.index, ptable.index)
self.assert_eq(repr(ktable.index), repr(ptable.index))
def test_stack(self):
pdf_single_level_cols = pd.DataFrame(
[[0, 1], [2, 3]], index=["cat", "dog"], columns=["weight", "height"]
)
psdf_single_level_cols = ps.from_pandas(pdf_single_level_cols)
self.assert_eq(
psdf_single_level_cols.stack().sort_index(), pdf_single_level_cols.stack().sort_index()
)
multicol1 = pd.MultiIndex.from_tuples(
[("weight", "kg"), ("weight", "pounds")], names=["x", "y"]
)
pdf_multi_level_cols1 = pd.DataFrame(
[[1, 2], [2, 4]], index=["cat", "dog"], columns=multicol1
)
psdf_multi_level_cols1 = ps.from_pandas(pdf_multi_level_cols1)
self.assert_eq(
psdf_multi_level_cols1.stack().sort_index(), pdf_multi_level_cols1.stack().sort_index()
)
multicol2 = pd.MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
pdf_multi_level_cols2 = pd.DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=multicol2
)
psdf_multi_level_cols2 = ps.from_pandas(pdf_multi_level_cols2)
self.assert_eq(
psdf_multi_level_cols2.stack().sort_index(), pdf_multi_level_cols2.stack().sort_index()
)
pdf = pd.DataFrame(
{
("y", "c"): [True, True],
("x", "b"): [False, False],
("x", "c"): [True, False],
("y", "a"): [False, True],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.stack().sort_index(), pdf.stack().sort_index())
self.assert_eq(psdf[[]].stack().sort_index(), pdf[[]].stack().sort_index(), almost=True)
def test_unstack(self):
pdf = pd.DataFrame(
np.random.randn(3, 3),
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.unstack().sort_index(), pdf.unstack().sort_index(), almost=True)
self.assert_eq(
psdf.unstack().unstack().sort_index(), pdf.unstack().unstack().sort_index(), almost=True
)
def test_pivot_errors(self):
psdf = ps.range(10)
with self.assertRaisesRegex(ValueError, "columns should be set"):
psdf.pivot(index="id")
with self.assertRaisesRegex(ValueError, "values should be set"):
psdf.pivot(index="id", columns="id")
def test_pivot_table_errors(self):
pdf = pd.DataFrame(
{
"a": [4, 2, 3, 4, 8, 6],
"b": [1, 2, 2, 4, 2, 4],
"e": [1, 2, 2, 4, 2, 4],
"c": [1, 2, 9, 4, 7, 4],
},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assertRaises(KeyError, lambda: psdf.pivot_table(index=["c"], columns="a", values=5))
msg = "index should be a None or a list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index="c", columns="a", values="b")
msg = "pivot_table doesn't support aggfunc as dict and without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"})
msg = "columns should be one column name."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns=["a"], values=["b"], aggfunc={"b": "mean", "e": "sum"})
msg = "Columns in aggfunc must be the same as values."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["e", "c"], columns="a", values="b", aggfunc={"b": "mean", "e": "sum"}
)
msg = "values can't be a list without index."
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.pivot_table(columns="a", values=["b", "e"])
msg = "Wrong columns A."
with self.assertRaisesRegex(ValueError, msg):
psdf.pivot_table(
index=["c"], columns="A", values=["b", "e"], aggfunc={"b": "mean", "e": "sum"}
)
msg = "values should be one column or list of columns."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values=(["b"], ["c"]))
msg = "aggfunc must be a dict mapping from column name to aggregate functions"
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(columns="a", values="b", aggfunc={"a": lambda x: sum(x)})
psdf = ps.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
},
columns=["A", "B", "C", "D", "E"],
index=np.random.rand(9),
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(
index=["C"], columns="A", values=["B", "E"], aggfunc={"B": "mean", "E": "sum"}
)
msg = "values should be a numeric type."
with self.assertRaisesRegex(TypeError, msg):
psdf.pivot_table(index=["C"], columns="A", values="B", aggfunc={"B": "mean"})
def test_transpose(self):
# TODO: what if with random index?
pdf1 = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}, columns=["col1", "col2"])
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
data={"score": [9, 8], "kids": [0, 0], "age": [12, 22]},
columns=["score", "kids", "age"],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf1.transpose().sort_index(), psdf1.transpose().sort_index())
self.assert_eq(pdf2.transpose().sort_index(), psdf2.transpose().sort_index())
pdf3 = pd.DataFrame(
{
("cg1", "a"): [1, 2, 3],
("cg1", "b"): [4, 5, 6],
("cg2", "c"): [7, 8, 9],
("cg3", "d"): [9, 9, 9],
},
index=pd.MultiIndex.from_tuples([("rg1", "x"), ("rg1", "y"), ("rg2", "z")]),
)
psdf3 = ps.from_pandas(pdf3)
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
with option_context("compute.max_rows", None):
self.assert_eq(pdf3.transpose().sort_index(), psdf3.transpose().sort_index())
def _test_cummin(self, pdf, psdf):
self.assert_eq(pdf.cummin(), psdf.cummin())
self.assert_eq(pdf.cummin(skipna=False), psdf.cummin(skipna=False))
self.assert_eq(pdf.cummin().sum(), psdf.cummin().sum())
def test_cummin(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def test_cummin_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummin(pdf, psdf)
def _test_cummax(self, pdf, psdf):
self.assert_eq(pdf.cummax(), psdf.cummax())
self.assert_eq(pdf.cummax(skipna=False), psdf.cummax(skipna=False))
self.assert_eq(pdf.cummax().sum(), psdf.cummax().sum())
def test_cummax(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def test_cummax_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cummax(pdf, psdf)
def _test_cumsum(self, pdf, psdf):
self.assert_eq(pdf.cumsum(), psdf.cumsum())
self.assert_eq(pdf.cumsum(skipna=False), psdf.cumsum(skipna=False))
self.assert_eq(pdf.cumsum().sum(), psdf.cumsum().sum())
def test_cumsum(self):
pdf = pd.DataFrame(
[[2.0, 1.0], [5, None], [1.0, 0.0], [2.0, 4.0], [4.0, 9.0]],
columns=list("AB"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def test_cumsum_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumsum(pdf, psdf)
def _test_cumprod(self, pdf, psdf):
self.assert_eq(pdf.cumprod(), psdf.cumprod(), almost=True)
self.assert_eq(pdf.cumprod(skipna=False), psdf.cumprod(skipna=False), almost=True)
self.assert_eq(pdf.cumprod().sum(), psdf.cumprod().sum(), almost=True)
def test_cumprod(self):
pdf = pd.DataFrame(
[[2.0, 1.0, 1], [5, None, 2], [1.0, -1.0, -3], [2.0, 0, 4], [4.0, 9.0, 5]],
columns=list("ABC"),
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_cumprod_multiindex_columns(self):
arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])]
pdf = pd.DataFrame(np.random.rand(3, 4), index=["A", "C", "B"], columns=arrays)
pdf.at["C", ("A", "two")] = None
psdf = ps.from_pandas(pdf)
self._test_cumprod(pdf, psdf)
def test_drop_duplicates(self):
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
# inplace is False
for keep in ["first", "last", False]:
with self.subTest(keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates("a", keep=keep).sort_index(),
psdf.drop_duplicates("a", keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
psdf.drop_duplicates(["a", "b"], keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
psdf.set_index("a", append=True).drop_duplicates("b", keep=keep).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
# inplace is False
for keep in ["first", "last", False]:
with self.subTest("multi-index columns", keep=keep):
self.assert_eq(
pdf.drop_duplicates(keep=keep).sort_index(),
psdf.drop_duplicates(keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
psdf.drop_duplicates(("x", "a"), keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
psdf.drop_duplicates([("x", "a"), ("y", "b")], keep=keep).sort_index(),
)
# inplace is True
subset_list = [None, "a", ["a", "b"]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
pser = pdf.a
psser = psdf.a
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# multi-index columns, inplace is True
subset_list = [None, ("x", "a"), [("x", "a"), ("y", "b")]]
for subset in subset_list:
pdf = pd.DataFrame(
{"a": [1, 2, 2, 2, 3], "b": ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
pser = pdf[("x", "a")]
psser = psdf[("x", "a")]
pdf.drop_duplicates(subset=subset, inplace=True)
psdf.drop_duplicates(subset=subset, inplace=True)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser.sort_index(), pser.sort_index())
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 2, 2, 3], 20: ["a", "a", "a", "c", "d"]}, index=np.random.rand(5)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.drop_duplicates(10, keep=keep).sort_index(),
psdf.drop_duplicates(10, keep=keep).sort_index(),
)
self.assert_eq(
pdf.drop_duplicates([10, 20], keep=keep).sort_index(),
psdf.drop_duplicates([10, 20], keep=keep).sort_index(),
)
def test_reindex(self):
index = pd.Index(["A", "B", "C", "D", "E"])
columns = pd.Index(["numbers"])
pdf = pd.DataFrame([1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns = pd.Index(["numbers"], name="cols")
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
pdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], columns=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
psdf.reindex(["A", "B", "C"], index=["numbers", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "B"]).sort_index(), psdf.reindex(index=["A", "B"]).sort_index()
)
self.assert_eq(
pdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
psdf.reindex(index=["A", "B", "2", "3"]).sort_index(),
)
self.assert_eq(
pdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
psdf.reindex(index=["A", "E", "2", "3"], fill_value=0).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"]).sort_index(),
psdf.reindex(columns=["numbers"]).sort_index(),
)
self.assert_eq(
pdf.reindex(columns=["numbers"], copy=True).sort_index(),
psdf.reindex(columns=["numbers"], copy=True).sort_index(),
)
# Using float as fill_value to avoid int64/32 clash
self.assert_eq(
pdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
psdf.reindex(columns=["numbers", "2", "3"], fill_value=0.0).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"])
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
columns2 = pd.Index(["numbers", "2", "3"], name="cols2")
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
# Reindexing single Index on single Index
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = pd.DataFrame({"index2": ["A", "C", "D", "E", "0"]}).set_index("index2").index
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
# Reindexing MultiIndex on single Index
pindex = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("F", "G")], names=["name1", "name2"]
)
kindex = ps.from_pandas(pindex)
self.assert_eq(
pdf.reindex(index=pindex, fill_value=0.0).sort_index(),
psdf.reindex(index=kindex, fill_value=0.0).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["numbers", "2", "3"], axis=2))
self.assertRaises(TypeError, lambda: psdf.reindex(columns="numbers"))
self.assertRaises(TypeError, lambda: psdf.reindex(index=["A", "B", "C"], axis=1))
self.assertRaises(TypeError, lambda: psdf.reindex(index=123))
# Reindexing MultiIndex on MultiIndex
pdf = pd.DataFrame({"numbers": [1.0, 2.0, None]}, index=pindex)
psdf = ps.from_pandas(pdf)
pindex2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name1", "name2"]
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
pindex2 = (
pd.DataFrame({"index_level_1": ["A", "C", "I"], "index_level_2": ["G", "D", "J"]})
.set_index(["index_level_1", "index_level_2"])
.index
)
kindex2 = ps.from_pandas(pindex2)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
columns = pd.MultiIndex.from_tuples([("X", "numbers")], names=["cols1", "cols2"])
pdf.columns = columns
psdf.columns = columns
# Reindexing MultiIndex index on MultiIndex columns and MultiIndex index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
index = pd.Index(["A", "B", "C", "D", "E"])
pdf = pd.DataFrame(data=[1.0, 2.0, 3.0, 4.0, None], index=index, columns=columns)
psdf = ps.from_pandas(pdf)
pindex2 = pd.Index(["A", "C", "D", "E", "0"], name="index2")
kindex2 = ps.from_pandas(pindex2)
# Reindexing single Index on MultiIndex columns and single Index
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(index=pindex2, fill_value=fill_value).sort_index(),
psdf.reindex(index=kindex2, fill_value=fill_value).sort_index(),
)
for fill_value in [None, 0.0]:
self.assert_eq(
pdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
psdf.reindex(
columns=[("X", "numbers"), ("Y", "2"), ("Y", "3")], fill_value=fill_value
).sort_index(),
)
columns2 = pd.MultiIndex.from_tuples(
[("X", "numbers"), ("Y", "2"), ("Y", "3")], names=["cols3", "cols4"]
)
self.assert_eq(
pdf.reindex(columns=columns2).sort_index(),
psdf.reindex(columns=columns2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex(columns=["X"]))
self.assertRaises(ValueError, lambda: psdf.reindex(columns=[("X",)]))
def test_reindex_like(self):
data = [[1.0, 2.0], [3.0, None], [None, 4.0]]
index = pd.Index(["A", "B", "C"], name="index")
columns = pd.Index(["numbers", "values"], name="cols")
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
# Reindexing single Index on single Index
data2 = [[5.0, None], [6.0, 7.0], [8.0, None]]
index2 = pd.Index(["A", "C", "D"], name="index2")
columns2 = pd.Index(["numbers", "F"], name="cols2")
pdf2 = pd.DataFrame(data=data2, index=index2, columns=columns2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
pdf2 = pd.DataFrame({"index_level_1": ["A", "C", "I"]})
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2.set_index(["index_level_1"])).sort_index(),
psdf.reindex_like(psdf2.set_index(["index_level_1"])).sort_index(),
)
# Reindexing MultiIndex on single Index
index2 = pd.MultiIndex.from_tuples(
[("A", "G"), ("C", "D"), ("I", "J")], names=["name3", "name4"]
)
pdf2 = pd.DataFrame(data=data2, index=index2)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
self.assertRaises(TypeError, lambda: psdf.reindex_like(index2))
self.assertRaises(AssertionError, lambda: psdf2.reindex_like(psdf))
# Reindexing MultiIndex on MultiIndex
columns2 = pd.MultiIndex.from_tuples(
[("numbers", "third"), ("values", "second")], names=["cols3", "cols4"]
)
pdf2.columns = columns2
psdf2.columns = columns2
columns = pd.MultiIndex.from_tuples(
[("numbers", "first"), ("values", "second")], names=["cols1", "cols2"]
)
index = pd.MultiIndex.from_tuples(
[("A", "B"), ("C", "D"), ("E", "F")], names=["name1", "name2"]
)
pdf = pd.DataFrame(data=data, index=index, columns=columns)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.reindex_like(pdf2).sort_index(),
psdf.reindex_like(psdf2).sort_index(),
)
def test_melt(self):
pdf = pd.DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6], "C": [7, 8, 9]}, index=np.random.rand(3)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars="A").sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars="A").sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=["A", "B"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["C"])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=["A"], value_vars=["C"]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname")
.sort_values(["myVarname", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=["A"], value_vars=["B"], var_name="myVarname", value_name="myValname"
).sort_values(["myVarname", "myValname"]),
)
self.assert_eq(
psdf.melt(value_vars=("A", "B"))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=("A", "B")).sort_values(["variable", "value"]),
)
self.assertRaises(KeyError, lambda: psdf.melt(id_vars="Z"))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars="Z"))
# multi-index columns
TEN = 10.0
TWELVE = 20.0
columns = pd.MultiIndex.from_tuples([(TEN, "A"), (TEN, "B"), (TWELVE, "C")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["variable_0", "variable_1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable_0", "variable_1", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[(TEN, "A")], value_vars=[(TWELVE, "C")])
.sort_values(["variable_0", "variable_1", "value"])
.rename(columns=name_like_string),
)
self.assert_eq(
psdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.reset_index(drop=True),
pdf.melt(
id_vars=[(TEN, "A")],
value_vars=[(TEN, "B")],
var_name=["myV1", "myV2"],
value_name="myValname",
)
.sort_values(["myV1", "myV2", "myValname"])
.rename(columns=name_like_string),
)
columns.names = ["v0", "v1"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.melt().sort_values(["v0", "v1", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["v0", "v1", "value"]),
)
self.assertRaises(ValueError, lambda: psdf.melt(id_vars=(TEN, "A")))
self.assertRaises(ValueError, lambda: psdf.melt(value_vars=(TEN, "A")))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[TEN]))
self.assertRaises(KeyError, lambda: psdf.melt(id_vars=[(TWELVE, "A")]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[TWELVE]))
self.assertRaises(KeyError, lambda: psdf.melt(value_vars=[(TWELVE, "A")]))
# non-string names
pdf.columns = [10.0, 20.0, 30.0]
psdf.columns = [10.0, 20.0, 30.0]
self.assert_eq(
psdf.melt().sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt().sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=10.0).sort_values(["variable", "value"]).reset_index(drop=True),
pdf.melt(id_vars=10.0).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0, 20.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0, 20.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(id_vars=[10.0], value_vars=[30.0])
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(id_vars=[10.0], value_vars=[30.0]).sort_values(["variable", "value"]),
)
self.assert_eq(
psdf.melt(value_vars=(10.0, 20.0))
.sort_values(["variable", "value"])
.reset_index(drop=True),
pdf.melt(value_vars=(10.0, 20.0)).sort_values(["variable", "value"]),
)
def test_all(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.all(), pdf.all())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.all(), pdf.all())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.all(axis=1)
def test_any(self):
pdf = pd.DataFrame(
{
"col1": [False, False, False],
"col2": [True, False, False],
"col3": [0, 0, 1],
"col4": [0, 1, 2],
"col5": [False, False, None],
"col6": [True, False, None],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.any(), pdf.any())
columns = pd.MultiIndex.from_tuples(
[
("a", "col1"),
("a", "col2"),
("a", "col3"),
("b", "col4"),
("b", "col5"),
("c", "col6"),
]
)
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
columns.names = ["X", "Y"]
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.any(), pdf.any())
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.any(axis=1)
def test_rank(self):
pdf = pd.DataFrame(
data={"col1": [1, 2, 3, 1], "col2": [3, 4, 3, 1]},
columns=["col1", "col2"],
index=np.random.rand(4),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
self.assert_eq(pdf.rank().sum(), psdf.rank().sum())
self.assert_eq(
pdf.rank(ascending=False).sort_index(), psdf.rank(ascending=False).sort_index()
)
self.assert_eq(pdf.rank(method="min").sort_index(), psdf.rank(method="min").sort_index())
self.assert_eq(pdf.rank(method="max").sort_index(), psdf.rank(method="max").sort_index())
self.assert_eq(
pdf.rank(method="first").sort_index(), psdf.rank(method="first").sort_index()
)
self.assert_eq(
pdf.rank(method="dense").sort_index(), psdf.rank(method="dense").sort_index()
)
msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'"
with self.assertRaisesRegex(ValueError, msg):
psdf.rank(method="nothing")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "col1"), ("y", "col2")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.rank().sort_index(), psdf.rank().sort_index())
def test_round(self):
pdf = pd.DataFrame(
{
"A": [0.028208, 0.038683, 0.877076],
"B": [0.992815, 0.645646, 0.149370],
"C": [0.173891, 0.577595, 0.491027],
},
columns=["A", "B", "C"],
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 0, 2], index=["A", "B", "C"])
psser = ps.Series([1, 0, 2], index=["A", "B", "C"])
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(pdf.round({"A": 1, "C": 2}), psdf.round({"A": 1, "C": 2}))
self.assert_eq(pdf.round({"A": 1, "D": 2}), psdf.round({"A": 1, "D": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
msg = "decimals must be an integer, a dict-like or a Series"
with self.assertRaisesRegex(TypeError, msg):
psdf.round(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
pdf.columns = columns
psdf.columns = columns
pser = pd.Series([1, 0, 2], index=columns)
psser = ps.Series([1, 0, 2], index=columns)
self.assert_eq(pdf.round(2), psdf.round(2))
self.assert_eq(
pdf.round({("X", "A"): 1, ("Y", "C"): 2}), psdf.round({("X", "A"): 1, ("Y", "C"): 2})
)
self.assert_eq(pdf.round({("X", "A"): 1, "Y": 2}), psdf.round({("X", "A"): 1, "Y": 2}))
self.assert_eq(pdf.round(pser), psdf.round(psser))
# non-string names
pdf = pd.DataFrame(
{
10: [0.028208, 0.038683, 0.877076],
20: [0.992815, 0.645646, 0.149370],
30: [0.173891, 0.577595, 0.491027],
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.round({10: 1, 30: 2}), psdf.round({10: 1, 30: 2}))
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
self.assert_eq(pdf.shift().sum().astype(int), psdf.shift().sum())
# Need the expected result since pandas 0.23 does not support `fill_value` argument.
pdf1 = pd.DataFrame(
{"Col1": [0, 0, 0, 10, 20], "Col2": [0, 0, 0, 13, 23], "Col3": [0, 0, 0, 17, 27]},
index=pdf.index,
)
self.assert_eq(pdf1, psdf.shift(periods=3, fill_value=0))
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.shift(1.5)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.shift(3), psdf.shift(3))
self.assert_eq(pdf.shift().shift(-1), psdf.shift().shift(-1))
def test_diff(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [1, 1, 2, 3, 5, 8], "c": [1, 4, 9, 16, 25, 36]},
index=np.random.rand(6),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.diff(), psdf.diff())
self.assert_eq(pdf.diff().diff(-1), psdf.diff().diff(-1))
self.assert_eq(pdf.diff().sum().astype(int), psdf.diff().sum())
msg = "should be an int"
with self.assertRaisesRegex(TypeError, msg):
psdf.diff(1.5)
msg = 'axis should be either 0 or "index" currently.'
with self.assertRaisesRegex(NotImplementedError, msg):
psdf.diff(axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "Col1"), ("x", "Col2"), ("y", "Col3")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.diff(), psdf.diff())
def test_duplicated(self):
pdf = pd.DataFrame(
{"a": [1, 1, 2, 3], "b": [1, 1, 1, 4], "c": [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(keep="last").sort_index(),
psdf.duplicated(keep="last").sort_index(),
)
self.assert_eq(
pdf.duplicated(keep=False).sort_index(),
psdf.duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset="b").sort_index(),
psdf.duplicated(subset="b").sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=["b"]).sort_index(),
psdf.duplicated(subset=["b"]).sort_index(),
)
with self.assertRaisesRegex(ValueError, "'keep' only supports 'first', 'last' and False"):
psdf.duplicated(keep="false")
with self.assertRaisesRegex(KeyError, "'d'"):
psdf.duplicated(subset=["d"])
pdf.index.name = "x"
psdf.index.name = "x"
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
# multi-index
self.assert_eq(
pdf.set_index("a", append=True).duplicated().sort_index(),
psdf.set_index("a", append=True).duplicated().sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
psdf.set_index("a", append=True).duplicated(keep=False).sort_index(),
)
self.assert_eq(
pdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
psdf.set_index("a", append=True).duplicated(subset=["b"]).sort_index(),
)
# mutli-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=("x", "b")).sort_index(),
psdf.duplicated(subset=("x", "b")).sort_index(),
)
self.assert_eq(
pdf.duplicated(subset=[("x", "b")]).sort_index(),
psdf.duplicated(subset=[("x", "b")]).sort_index(),
)
# non-string names
pdf = pd.DataFrame(
{10: [1, 1, 2, 3], 20: [1, 1, 1, 4], 30: [1, 1, 1, 5]}, index=np.random.rand(4)
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.duplicated().sort_index(), psdf.duplicated().sort_index())
self.assert_eq(
pdf.duplicated(subset=10).sort_index(),
psdf.duplicated(subset=10).sort_index(),
)
def test_ffill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.ffill(), pdf.ffill())
self.assert_eq(psdf.ffill(limit=1), pdf.ffill(limit=1))
pser = pdf.y
psser = psdf.y
psdf.ffill(inplace=True)
pdf.ffill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[2]], pser[idx[2]])
def test_bfill(self):
idx = np.random.rand(6)
pdf = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, np.nan],
},
index=idx,
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.bfill(), pdf.bfill())
self.assert_eq(psdf.bfill(limit=1), pdf.bfill(limit=1))
pser = pdf.x
psser = psdf.x
psdf.bfill(inplace=True)
pdf.bfill(inplace=True)
self.assert_eq(psdf, pdf)
self.assert_eq(psser, pser)
self.assert_eq(psser[idx[0]], pser[idx[0]])
def test_filter(self):
pdf = pd.DataFrame(
{
"aa": ["aa", "bd", "bc", "ab", "ce"],
"ba": [1, 2, 3, 4, 5],
"cb": [1.0, 2.0, 3.0, 4.0, 5.0],
"db": [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index("aa")
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
with option_context("compute.isin_limit", 0):
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=["ba", "db"], axis=1).sort_index(),
pdf.filter(items=["ba", "db"], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
pdf = pdf.set_index("ba", append=True)
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
pdf.filter(items=[("aa", 1), ("bd", 2)], axis=0).sort_index(),
)
with self.assertRaisesRegex(TypeError, "Unsupported type list"):
psdf.filter(items=[["aa", 1], ("bd", 2)], axis=0)
with self.assertRaisesRegex(ValueError, "The item should not be empty."):
psdf.filter(items=[(), ("bd", 2)], axis=0)
self.assert_eq(psdf.filter(like="b", axis=0), pdf.filter(like="b", axis=0))
self.assert_eq(psdf.filter(regex="b.*", axis=0), pdf.filter(regex="b.*", axis=0))
with self.assertRaisesRegex(ValueError, "items should be a list-like object"):
psdf.filter(items="b")
with self.assertRaisesRegex(ValueError, "No axis named"):
psdf.filter(regex="b.*", axis=123)
with self.assertRaisesRegex(TypeError, "Must pass either `items`, `like`"):
psdf.filter()
with self.assertRaisesRegex(TypeError, "mutually exclusive"):
psdf.filter(regex="b.*", like="aaa")
# multi-index columns
pdf = pd.DataFrame(
{
("x", "aa"): ["aa", "ab", "bc", "bd", "ce"],
("x", "ba"): [1, 2, 3, 4, 5],
("y", "cb"): [1.0, 2.0, 3.0, 4.0, 5.0],
("z", "db"): [1.0, np.nan, 3.0, np.nan, 5.0],
}
)
pdf = pdf.set_index(("x", "aa"))
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf.filter(items=["ab", "aa"], axis=0).sort_index(),
pdf.filter(items=["ab", "aa"], axis=0).sort_index(),
)
self.assert_eq(
psdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
pdf.filter(items=[("x", "ba"), ("z", "db")], axis=1).sort_index(),
)
self.assert_eq(psdf.filter(like="b", axis="index"), pdf.filter(like="b", axis="index"))
self.assert_eq(psdf.filter(like="c", axis="columns"), pdf.filter(like="c", axis="columns"))
self.assert_eq(
psdf.filter(regex="b.*", axis="index"), pdf.filter(regex="b.*", axis="index")
)
self.assert_eq(
psdf.filter(regex="b.*", axis="columns"), pdf.filter(regex="b.*", axis="columns")
)
def test_pipe(self):
psdf = ps.DataFrame(
{"category": ["A", "A", "B"], "col1": [1, 2, 3], "col2": [4, 5, 6]},
columns=["category", "col1", "col2"],
)
self.assertRaisesRegex(
ValueError,
"arg is both the pipe target and a keyword argument",
lambda: psdf.pipe((lambda x: x, "arg"), arg="1"),
)
def test_transform(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=2).sort_index(),
pdf.transform(lambda x, y: x + y, y=2).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
self.assert_eq(
psdf.transform(lambda x, y: x + y, y=1).sort_index(),
pdf.transform(lambda x, y: x + y, y=1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.transform(1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.transform(lambda x: x + 1).sort_index(),
pdf.transform(lambda x: x + 1).sort_index(),
)
def test_apply(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
pdf.apply(lambda x, b: x + b, args=(1,)).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, b: x + b, b=1).sort_index(),
pdf.apply(lambda x, b: x + b, b=1).sort_index(),
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
self.assert_eq(
psdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
pdf.apply(lambda x, c: len(x) + c, axis=1, c=100).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.apply(1)
with self.assertRaisesRegex(TypeError, "The given function.*1 or 'column'; however"):
def f1(_) -> ps.DataFrame[int]:
pass
psdf.apply(f1, axis=0)
with self.assertRaisesRegex(TypeError, "The given function.*0 or 'index'; however"):
def f2(_) -> ps.Series[int]:
pass
psdf.apply(f2, axis=1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: x + 1).sort_index(), pdf.apply(lambda x: x + 1).sort_index()
)
# returning a Series
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.apply(lambda x: len(x), axis=1).sort_index(),
pdf.apply(lambda x: len(x), axis=1).sort_index(),
)
def test_apply_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.apply(identify1, axis=1)
expected = pdf.apply(identify1, axis=1)
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.apply(identify2, axis=1)
expected = pdf.apply(identify2, axis=1)
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_apply_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, a: pdf + a, args=(1,)).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf, b: pdf + b, b=1).sort_index(),
(pdf + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.apply_batch(1)
with self.assertRaisesRegex(TypeError, "The given function.*frame as its type hints"):
def f2(_) -> ps.Series[int]:
pass
psdf.pandas_on_spark.apply_batch(f2)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.apply_batch(lambda pdf: 1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(), (pdf + 1).sort_index()
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_apply_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.apply_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.apply_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=np.random.rand(9),
)
psdf = ps.from_pandas(pdf)
def identify3(x) -> ps.DataFrame[float, [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify3)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
# For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+
if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion("1.21"):
import numpy.typing as ntp
psdf = ps.from_pandas(pdf)
def identify4(
x,
) -> ps.DataFrame[float, [int, ntp.NDArray[int]]]: # type: ignore[name-defined]
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
arrays = [[1, 2, 3, 4, 5, 6, 7, 8, 9], ["a", "b", "c", "d", "e", "f", "g", "h", "i"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [[e] for e in [4, 5, 6, 3, 2, 1, 0, 0, 0]]},
index=idx,
)
psdf = ps.from_pandas(pdf)
def identify4(x) -> ps.DataFrame[[int, str], [int, List[int]]]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify4)
actual.index.names = ["number", "color"]
actual.columns = ["a", "b"]
self.assert_eq(actual, pdf)
def identify5(
x,
) -> ps.DataFrame[
[("number", int), ("color", str)], [("a", int), ("b", List[int])] # noqa: F405
]:
return x
actual = psdf.pandas_on_spark.apply_batch(identify5)
self.assert_eq(actual, pdf)
def test_transform_batch(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6] * 100,
"b": [1.0, 1.0, 2.0, 3.0, 5.0, 8.0] * 100,
"c": [1, 4, 9, 16, 25, 36] * 100,
},
columns=["a", "b", "c"],
index=np.random.rand(600),
)
psdf = ps.DataFrame(pdf)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.c + 1).sort_index(),
(pdf.c + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf + 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b + 1).sort_index(),
(pdf.b + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf + a, 1).sort_index(),
(pdf + 1).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf, a: pdf.c + a, a=1).sort_index(),
(pdf.c + 1).sort_index(),
)
with self.assertRaisesRegex(AssertionError, "the first argument should be a callable"):
psdf.pandas_on_spark.transform_batch(1)
with self.assertRaisesRegex(ValueError, "The given function should return a frame"):
psdf.pandas_on_spark.transform_batch(lambda pdf: 1)
with self.assertRaisesRegex(
ValueError, "transform_batch cannot produce aggregated results"
):
psdf.pandas_on_spark.transform_batch(lambda pdf: pd.Series(1))
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
with option_context("compute.shortcut_limit", 500):
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda x: x + 1).sort_index(),
(pdf + 1).sort_index(),
)
def test_transform_batch_with_type(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
def identify1(x) -> ps.DataFrame[int, int]:
return x
# Type hints set the default column names, and we use default index for
# pandas API on Spark. Here we ignore both diff.
actual = psdf.pandas_on_spark.transform_batch(identify1)
expected = pdf
self.assert_eq(sorted(actual["c0"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["c1"].to_numpy()), sorted(expected["b"].to_numpy()))
def identify2(x) -> ps.DataFrame[slice("a", int), slice("b", int)]: # noqa: F405
return x
actual = psdf.pandas_on_spark.transform_batch(identify2)
expected = pdf
self.assert_eq(sorted(actual["a"].to_numpy()), sorted(expected["a"].to_numpy()))
self.assert_eq(sorted(actual["b"].to_numpy()), sorted(expected["b"].to_numpy()))
def test_transform_batch_same_anchor(self):
psdf = ps.range(10)
psdf["d"] = psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.id + 1)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(pdf) -> ps.Series[np.int64]:
return pdf.id + 1
psdf["d"] = psdf.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
psdf = ps.range(10)
def plus_one(ser) -> ps.Series[np.int64]:
return ser + 1
psdf["d"] = psdf.id.pandas_on_spark.transform_batch(plus_one)
self.assert_eq(
psdf,
pd.DataFrame({"id": list(range(10)), "d": list(range(1, 11))}, columns=["id", "d"]),
)
def test_empty_timestamp(self):
pdf = pd.DataFrame(
{
"t": [
datetime(2019, 1, 1, 0, 0, 0),
datetime(2019, 1, 2, 0, 0, 0),
datetime(2019, 1, 3, 0, 0, 0),
]
},
index=np.random.rand(3),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf[psdf["t"] != psdf["t"]], pdf[pdf["t"] != pdf["t"]])
self.assert_eq(psdf[psdf["t"] != psdf["t"]].dtypes, pdf[pdf["t"] != pdf["t"]].dtypes)
def test_to_spark(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(ValueError, "'index_col' cannot be overlapped"):
psdf.to_spark(index_col="a")
with self.assertRaisesRegex(ValueError, "length of index columns.*1.*3"):
psdf.to_spark(index_col=["x", "y", "z"])
def test_keys(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.keys(), pdf.keys())
def test_quantile(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
self.assert_eq(psdf.loc[[]].quantile(0.5), pdf.loc[[]].quantile(0.5))
self.assert_eq(
psdf.loc[[]].quantile([0.25, 0.5, 0.75]), pdf.loc[[]].quantile([0.25, 0.5, 0.75])
)
with self.assertRaisesRegex(
NotImplementedError, 'axis should be either 0 or "index" currently.'
):
psdf.quantile(0.5, axis=1)
with self.assertRaisesRegex(TypeError, "accuracy must be an integer; however"):
psdf.quantile(accuracy="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q="a")
with self.assertRaisesRegex(TypeError, "q must be a float or an array of floats;"):
psdf.quantile(q=["a"])
with self.assertRaisesRegex(
ValueError, r"percentiles should all be in the interval \[0, 1\]"
):
psdf.quantile(q=[1.1])
self.assert_eq(
psdf.quantile(0.5, numeric_only=False), pdf.quantile(0.5, numeric_only=False)
)
self.assert_eq(
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
pdf.quantile([0.25, 0.5, 0.75], numeric_only=False),
)
# multi-index column
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
pdf = pd.DataFrame({"x": ["a", "b", "c"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.quantile(0.5), pdf.quantile(0.5))
self.assert_eq(psdf.quantile([0.25, 0.5, 0.75]), pdf.quantile([0.25, 0.5, 0.75]))
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile(0.5, numeric_only=False)
with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"):
psdf.quantile([0.25, 0.5, 0.75], numeric_only=False)
def test_pct_change(self):
pdf = pd.DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [300, 200, 400, 200]},
index=np.random.rand(4),
)
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.pct_change(2), pdf.pct_change(2), check_exact=False)
self.assert_eq(psdf.pct_change().sum(), pdf.pct_change().sum(), check_exact=False)
def test_where(self):
pdf, psdf = self.df_pair
# pandas requires `axis` argument when the `other` is Series.
# `axis` is not fully supported yet in pandas-on-Spark.
self.assert_eq(
psdf.where(psdf > 2, psdf.a + 10, axis=0), pdf.where(pdf > 2, pdf.a + 10, axis=0)
)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.where(1)
def test_mask(self):
psdf = ps.from_pandas(self.pdf)
with self.assertRaisesRegex(TypeError, "type of cond must be a DataFrame or Series"):
psdf.mask(1)
def test_query(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2), "C": range(10, 5, -1)})
psdf = ps.from_pandas(pdf)
exprs = ("A > B", "A < C", "C == B")
for expr in exprs:
self.assert_eq(psdf.query(expr), pdf.query(expr))
# test `inplace=True`
for expr in exprs:
dummy_psdf = psdf.copy()
dummy_pdf = pdf.copy()
pser = dummy_pdf.A
psser = dummy_psdf.A
dummy_pdf.query(expr, inplace=True)
dummy_psdf.query(expr, inplace=True)
self.assert_eq(dummy_psdf, dummy_pdf)
self.assert_eq(psser, pser)
# invalid values for `expr`
invalid_exprs = (1, 1.0, (exprs[0],), [exprs[0]])
for expr in invalid_exprs:
with self.assertRaisesRegex(
TypeError,
"expr must be a string to be evaluated, {} given".format(type(expr).__name__),
):
psdf.query(expr)
# invalid values for `inplace`
invalid_inplaces = (1, 0, "True", "False")
for inplace in invalid_inplaces:
with self.assertRaisesRegex(
TypeError,
'For argument "inplace" expected type bool, received type {}.'.format(
type(inplace).__name__
),
):
psdf.query("a < b", inplace=inplace)
# doesn't support for MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
with self.assertRaisesRegex(TypeError, "Doesn't support for MultiIndex columns"):
psdf.query("('A', 'Z') > ('B', 'X')")
def test_take(self):
pdf = pd.DataFrame(
{"A": range(0, 50000), "B": range(100000, 0, -2), "C": range(100000, 50000, -1)}
)
psdf = ps.from_pandas(pdf)
# axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "C")])
psdf.columns = columns
pdf.columns = columns
# MultiIndex columns with axis=0 (default)
self.assert_eq(psdf.take([1, 2]).sort_index(), pdf.take([1, 2]).sort_index())
self.assert_eq(psdf.take([-1, -2]).sort_index(), pdf.take([-1, -2]).sort_index())
self.assert_eq(
psdf.take(range(100, 110)).sort_index(), pdf.take(range(100, 110)).sort_index()
)
self.assert_eq(
psdf.take(range(-110, -100)).sort_index(), pdf.take(range(-110, -100)).sort_index()
)
self.assert_eq(
psdf.take([10, 100, 1000, 10000]).sort_index(),
pdf.take([10, 100, 1000, 10000]).sort_index(),
)
self.assert_eq(
psdf.take([-10, -100, -1000, -10000]).sort_index(),
pdf.take([-10, -100, -1000, -10000]).sort_index(),
)
# axis=1
self.assert_eq(
psdf.take([1, 2], axis=1).sort_index(), pdf.take([1, 2], axis=1).sort_index()
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(), pdf.take([-1, -2], axis=1).sort_index()
)
self.assert_eq(
psdf.take(range(1, 3), axis=1).sort_index(),
pdf.take(range(1, 3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take(range(-1, -3), axis=1).sort_index(),
pdf.take(range(-1, -3), axis=1).sort_index(),
)
self.assert_eq(
psdf.take([2, 1], axis=1).sort_index(),
pdf.take([2, 1], axis=1).sort_index(),
)
self.assert_eq(
psdf.take([-1, -2], axis=1).sort_index(),
pdf.take([-1, -2], axis=1).sort_index(),
)
# Checking the type of indices.
self.assertRaises(TypeError, lambda: psdf.take(1))
self.assertRaises(TypeError, lambda: psdf.take("1"))
self.assertRaises(TypeError, lambda: psdf.take({1, 2}))
self.assertRaises(TypeError, lambda: psdf.take({1: None, 2: None}))
def test_axes(self):
pdf = self.pdf
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.axes, psdf.axes)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
self.assert_eq(pdf.axes, psdf.axes)
def test_udt(self):
sparse_values = {0: 0.1, 1: 1.1}
sparse_vector = SparseVector(len(sparse_values), sparse_values)
pdf = pd.DataFrame({"a": [sparse_vector], "b": [10]})
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf, pdf)
def test_eval(self):
pdf = pd.DataFrame({"A": range(1, 6), "B": range(10, 0, -2)})
psdf = ps.from_pandas(pdf)
# operation between columns (returns Series)
self.assert_eq(pdf.eval("A + B"), psdf.eval("A + B"))
self.assert_eq(pdf.eval("A + A"), psdf.eval("A + A"))
# assignment (returns DataFrame)
self.assert_eq(pdf.eval("C = A + B"), psdf.eval("C = A + B"))
self.assert_eq(pdf.eval("A = A + A"), psdf.eval("A = A + A"))
# operation between scalars (returns scalar)
self.assert_eq(pdf.eval("1 + 1"), psdf.eval("1 + 1"))
# complicated operations with assignment
self.assert_eq(
pdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
psdf.eval("B = A + B // (100 + 200) * (500 - B) - 10.5"),
)
# inplace=True (only support for assignment)
pdf.eval("C = A + B", inplace=True)
psdf.eval("C = A + B", inplace=True)
self.assert_eq(pdf, psdf)
pser = pdf.A
psser = psdf.A
pdf.eval("A = B + C", inplace=True)
psdf.eval("A = B + C", inplace=True)
self.assert_eq(pdf, psdf)
self.assert_eq(pser, psser)
# doesn't support for multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b"), ("z", "c")])
psdf.columns = columns
self.assertRaises(TypeError, lambda: psdf.eval("x.a + y.b"))
@unittest.skipIf(not have_tabulate, tabulate_requirement_message)
def test_to_markdown(self):
pdf = pd.DataFrame(data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]})
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.to_markdown(), psdf.to_markdown())
def test_cache(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
with psdf.spark.cache() as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(
repr(cached_df.spark.storage_level), repr(StorageLevel(True, True, False, True))
)
def test_persist(self):
pdf = pd.DataFrame(
[(0.2, 0.3), (0.0, 0.6), (0.6, 0.0), (0.2, 0.1)], columns=["dogs", "cats"]
)
psdf = ps.from_pandas(pdf)
storage_levels = [
StorageLevel.DISK_ONLY,
StorageLevel.MEMORY_AND_DISK,
StorageLevel.MEMORY_ONLY,
StorageLevel.OFF_HEAP,
]
for storage_level in storage_levels:
with psdf.spark.persist(storage_level) as cached_df:
self.assert_eq(isinstance(cached_df, CachedDataFrame), True)
self.assert_eq(repr(cached_df.spark.storage_level), repr(storage_level))
self.assertRaises(TypeError, lambda: psdf.spark.persist("DISK_ONLY"))
def test_squeeze(self):
axises = [None, 0, 1, "rows", "index", "columns"]
# Multiple columns
pdf = pd.DataFrame([[1, 2], [3, 4]], columns=["a", "b"], index=["x", "y"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Multiple columns with MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value
pdf = pd.DataFrame([[1]], columns=["a"], index=["x"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with single value with MultiIndex column
columns = pd.MultiIndex.from_tuples([("A", "Z")])
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values
pdf = pd.DataFrame([1, 2, 3, 4], columns=["a"])
psdf = ps.from_pandas(pdf)
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
# Single column with multiple values with MultiIndex column
pdf.columns = columns
psdf.columns = columns
for axis in axises:
self.assert_eq(pdf.squeeze(axis), psdf.squeeze(axis))
def test_rfloordiv(self):
pdf = pd.DataFrame(
{"angles": [0, 3, 4], "degrees": [360, 180, 360]},
index=["circle", "triangle", "rectangle"],
columns=["angles", "degrees"],
)
psdf = ps.from_pandas(pdf)
expected_result = pdf.rfloordiv(10)
self.assert_eq(psdf.rfloordiv(10), expected_result)
def test_truncate(self):
pdf1 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[1000, 550, 400, 0, -1, -20, -500],
)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf = ps.DataFrame(
{"A": ["b", "c", "d"], "B": ["i", "j", "k"], "C": ["p", "q", "r"]},
index=[550, 400, 0],
)
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X"), ("C", "Z")])
pdf1.columns = columns
psdf1.columns = columns
pdf2.columns = columns
psdf2.columns = columns
self.assert_eq(psdf1.truncate(), pdf1.truncate())
self.assert_eq(psdf1.truncate(before=-20), pdf1.truncate(before=-20))
self.assert_eq(psdf1.truncate(after=400), pdf1.truncate(after=400))
self.assert_eq(psdf1.truncate(copy=False), pdf1.truncate(copy=False))
self.assert_eq(psdf1.truncate(-20, 400, copy=False), pdf1.truncate(-20, 400, copy=False))
# The bug for these tests has been fixed in pandas 1.1.0.
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
self.assert_eq(psdf2.truncate(0, 550), pdf2.truncate(0, 550))
self.assert_eq(psdf2.truncate(0, 550, copy=False), pdf2.truncate(0, 550, copy=False))
else:
expected_psdf.columns = columns
self.assert_eq(psdf2.truncate(0, 550), expected_psdf)
self.assert_eq(psdf2.truncate(0, 550, copy=False), expected_psdf)
# axis = 1
self.assert_eq(psdf1.truncate(axis=1), pdf1.truncate(axis=1))
self.assert_eq(psdf1.truncate(before="B", axis=1), pdf1.truncate(before="B", axis=1))
self.assert_eq(psdf1.truncate(after="A", axis=1), pdf1.truncate(after="A", axis=1))
self.assert_eq(psdf1.truncate(copy=False, axis=1), pdf1.truncate(copy=False, axis=1))
self.assert_eq(psdf2.truncate("B", "C", axis=1), pdf2.truncate("B", "C", axis=1))
self.assert_eq(
psdf1.truncate("B", "C", copy=False, axis=1),
pdf1.truncate("B", "C", copy=False, axis=1),
)
# Exceptions
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, 100, 400, 0, -1, 550, -20],
)
msg = "truncate requires a sorted index"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate()
psdf = ps.DataFrame(
{
"A": ["a", "b", "c", "d", "e", "f", "g"],
"B": ["h", "i", "j", "k", "l", "m", "n"],
"C": ["o", "p", "q", "r", "s", "t", "u"],
},
index=[-500, -20, -1, 0, 400, 550, 1000],
)
msg = "Truncate: -20 must be after 400"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate(400, -20)
msg = "Truncate: B must be after C"
with self.assertRaisesRegex(ValueError, msg):
psdf.truncate("C", "B", axis=1)
def test_explode(self):
pdf = pd.DataFrame({"A": [[-1.0, np.nan], [0.0, np.inf], [1.0, -np.inf]], "B": 1})
pdf.index.name = "index"
pdf.columns.name = "columns"
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.name, expected_result1.index.name)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex
midx = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "c")], names=["index1", "index2"]
)
pdf.index = midx
psdf = ps.from_pandas(pdf)
expected_result1 = pdf.explode("A")
expected_result2 = pdf.explode("B")
self.assert_eq(psdf.explode("A"), expected_result1, almost=True)
self.assert_eq(psdf.explode("B"), expected_result2)
self.assert_eq(psdf.explode("A").index.names, expected_result1.index.names)
self.assert_eq(psdf.explode("A").columns.name, expected_result1.columns.name)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
# MultiIndex columns
columns = pd.MultiIndex.from_tuples([("A", "Z"), ("B", "X")], names=["column1", "column2"])
pdf.columns = columns
psdf.columns = columns
expected_result1 = pdf.explode(("A", "Z"))
expected_result2 = pdf.explode(("B", "X"))
expected_result3 = pdf.A.explode("Z")
self.assert_eq(psdf.explode(("A", "Z")), expected_result1, almost=True)
self.assert_eq(psdf.explode(("B", "X")), expected_result2)
self.assert_eq(psdf.explode(("A", "Z")).index.names, expected_result1.index.names)
self.assert_eq(psdf.explode(("A", "Z")).columns.names, expected_result1.columns.names)
self.assert_eq(psdf.A.explode("Z"), expected_result3, almost=True)
self.assertRaises(TypeError, lambda: psdf.explode(["A", "B"]))
self.assertRaises(ValueError, lambda: psdf.explode("A"))
def test_spark_schema(self):
psdf = ps.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("i1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
},
columns=["a", "b", "c", "d", "e", "f"],
)
actual = psdf.spark.schema()
expected = (
StructType()
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
actual = psdf.spark.schema("index")
expected = (
StructType()
.add("index", "long", False)
.add("a", "string", False)
.add("b", "long", False)
.add("c", "byte", False)
.add("d", "double", False)
.add("e", "boolean", False)
.add("f", "timestamp", False)
)
self.assertEqual(actual, expected)
def test_print_schema(self):
psdf = ps.DataFrame(
{"a": list("abc"), "b": list(range(1, 4)), "c": np.arange(3, 6).astype("i1")},
columns=["a", "b", "c"],
)
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
psdf.spark.print_schema()
actual = out.getvalue().strip()
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
out = StringIO()
sys.stdout = out
psdf.spark.print_schema(index_col="index")
actual = out.getvalue().strip()
self.assertTrue("index: long" in actual, actual)
self.assertTrue("a: string" in actual, actual)
self.assertTrue("b: long" in actual, actual)
self.assertTrue("c: byte" in actual, actual)
finally:
sys.stdout = prev
def test_explain_hint(self):
psdf1 = ps.DataFrame(
{"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 5]},
columns=["lkey", "value"],
)
psdf2 = ps.DataFrame(
{"rkey": ["foo", "bar", "baz", "foo"], "value": [5, 6, 7, 8]},
columns=["rkey", "value"],
)
merged = psdf1.merge(psdf2.spark.hint("broadcast"), left_on="lkey", right_on="rkey")
prev = sys.stdout
try:
out = StringIO()
sys.stdout = out
merged.spark.explain()
actual = out.getvalue().strip()
self.assertTrue("Broadcast" in actual, actual)
finally:
sys.stdout = prev
def test_mad(self):
pdf = pd.DataFrame(
{
"A": [1, 2, None, 4, np.nan],
"B": [-0.1, 0.2, -0.3, np.nan, 0.5],
"C": ["a", "b", "c", "d", "e"],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.mad(), pdf.mad())
self.assert_eq(psdf.mad(axis=1), pdf.mad(axis=1))
with self.assertRaises(ValueError):
psdf.mad(axis=2)
# MultiIndex columns
columns = | pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("A", "Z")]) | pandas.MultiIndex.from_tuples |
import run_utils as util
import options
import os, sys
try:
import pandas as pd
except:
print("Pandas not available\n")
def get_runtime_data(app_name, cmds, platform):
util.chdir(platform)
try:
df = pd.read_csv(
"runtimes.csv", names=["input_size", "runtime"], index_col="input_size"
)
return df.loc[cmds["ref_input"], "runtime"]
except:
print("Runtimes not available for " + app_name + "\n")
return 0
def get_runtimes(opts, all_plot_data, impl):
util.chdir(impl)
numba_dir = os.getcwd()
for app, cmds in opts.wls.wl_list.items():
if cmds["execute"] is True:
plot_data_entry = {}
if app in all_plot_data:
plot_data_entry = all_plot_data[app]
util.chdir(app)
app_dir = os.getcwd()
if (
opts.platform == options.platform.cpu
or opts.platform == options.platform.all
):
cpu_perf = get_runtime_data(app, cmds, "CPU")
if cpu_perf is not 0:
plot_data_entry[impl + "_cpu"] = cpu_perf
util.chdir(app_dir)
if (
opts.platform == options.platform.gpu
or opts.platform == options.platform.all
):
gpu_perf = get_runtime_data(app, cmds, "GPU")
if gpu_perf is not 0:
plot_data_entry[impl + "_gpu"] = gpu_perf
util.chdir(numba_dir)
all_plot_data[app] = plot_data_entry
def check_envvars_tools(opts):
if (
opts.analysis is not options.analysis.all
and opts.analysis is not options.analysis.perf
):
print(
"Plotting can be run only with option --analysis(-a) set to all or perf. Exiting"
)
sys.exit()
try:
import pandas
except:
print("Pandas not available. Plotting disabled\n")
sys.exit()
def plot_efficiency_graph(all_plot_data):
df = pd.DataFrame.from_dict(all_plot_data, orient="index")
plot = False
try:
df["CPU"] = (df["numba_cpu"] / df["native_cpu"]) * 100.00
plot = True
df.drop(columns=["native_cpu", "numba_cpu"], inplace=True)
except:
print("CPU Efficiency data not available\n")
try:
df["GPU"] = (df["numba_gpu"] / df["native_gpu"]) * 100.00
plot = True
df.drop(columns=["native_gpu", "numba_gpu"], inplace=True)
except:
print("GPU Efficiency data not available\n")
if plot:
# df.drop(columns=['native_cpu', 'native_gpu', 'numba_cpu', 'numba_gpu'], inplace=True)
bar_chart = df.plot.bar(rot=45, fontsize=10)
# bar_chart.legend(loc='upper right')
bar_chart.set_ylabel("Efficiency in percentage", fontsize=10)
bar_chart.set_xlabel("Benchmark", fontsize=10)
bar_chart.set_title(
"Efficiency of Numba execution relative to OpenMP execution on CPU and GPU",
fontsize=10,
)
fig = bar_chart.get_figure()
fig_filename = "Efficiency_graph.pdf"
fig.savefig(fig_filename, bbox_inches="tight")
else:
print(
"Insufficient data to generate Efficiency graph. Verify execution times in runtimes.csv\n"
)
def plot_speedup_graph(all_plot_data):
df = | pd.DataFrame.from_dict(all_plot_data, orient="index") | pandas.DataFrame.from_dict |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
#Code starts here
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data['Better_Event'] = None
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = | pd.read_csv(path) | pandas.read_csv |
import pandas as pd
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
class spatial_mapping():
def __init__(self, data, gps, gps_utc=0):
df=pd.DataFrame(data)
df[0]=pd.to_datetime(df[0]-693962,unit='D',origin=pd.Timestamp('1900-01-01'),utc=True)
df=df.rename(columns={0:'Time'})
self.data=df
if type(gps)==str:
if gps[-3:].lower()=='csv':
self.gps=pd.read_csv(gps)
if 'time' in self.gps:
self.gps.time=pd.to_datetime(self.gps.time)+timedelta(hours=gps_utc)
def extract_fragments(self, fragments, mean=True, fragment_method='time'):
# fragments: a csv file contains beginning and ending time of recording sessions
# Or providing a time interval (seconds) for separating recording sessions
if type(fragments)==str:
if fragments[-3:].lower()=='csv':
slice_df=pd.read_csv(fragments, sep=',')
if fragment_method=='time':
slice_df['Begin_time']= | pd.to_datetime(slice_df['Begin_time'],utc=True) | pandas.to_datetime |
import pandas as pd
import math
pd.options.mode.chained_assignment = None # default='warn'
# import sqlalchemy
# import pyodbc
ingredients_db = pd.read_csv('data/ingredients.csv') # hard coded (unchanging)
MTH_LIST = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
SEASONS_LIST = ['All', 'Spring', 'Summer', 'Autumn', 'Winter']
TAG_LIST = [
'Indian',
'Vegetarian',
'Slow Cooked',
'Cocotte-Friendly',
'Entree',
'Brunch',
'Quick'
]
BOOK_LIST = {
'HH': 'HH Art of Eating Well',
'GS': 'GS Good and Simple',
'EG': 'EG Eat Green',
'GG': 'GG Get the Glow',
'DE': 'DE Deliciously Ella',
'SC': 'SC Slow Cooker 5 Ingr',
'SF': 'SF Super Food Magazine',
'LM': 'LM Custom Recipes',
'BP': 'BP Custom Recipes'
}
day_map = {
'sat': 'Saturday',
'sun': 'Sunday',
'mon': 'Monday',
'tue': 'Tuesday',
'wed': 'Wednesday',
'thu': 'Thursday',
'fri': 'Friday'
}
season_map = {
'Winter': ['Dec', 'Jan', 'Feb'],
'Spring': ['Mar', 'Apr', 'May'],
'Summer': ['Jun', 'Jul', 'Aug'],
'Autumn': ['Sep', 'Oct', 'Nov'],
'All': ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
}
phdf_shopping_list = pd.DataFrame(data={
'Location': ['a', 'a'],
'Ingredient': ['ABCD', 'ASDF'],
'Recipe': ['ABCD', 'QWERW'],
'Quantity': [1.0, 2.0],
'Units': ['g', 'g'],
'Sub Group': [0, 0],
'Meals': [1, 1]
})
phdf_rec_ingr_tbl = pd.DataFrame(data={
'Ingredient': ['ABCD'],
'Quantity': [1.0],
'Units': ['g'],
'Sub Group': [0]
})
def meal_plan(week='all'):
load = | pd.read_csv('data/sl_store.txt') | pandas.read_csv |
# coding=utf-8
from datetime import datetime
from wit import Wit
from string import Template
from time import sleep
from collections import namedtuple
from pathlib import Path
import pandas as pd
import deepcut
import os
import glob
import pickle
import config
toq_key = config.toq_key
say_key = config.say_key
sub_key = config.sub_key
sec_key = config.sec_key
who_key = config.who_key
now_here = os.getcwd()
def get_file_name(dir_file):
fn = os.path.basename(dir_file)
fn_alone = os.path.splitext(fn)[0]
return fn_alone
# df คือตาราง extend คือคำที่ให้เข้าใจว่าตารางนี้เปลี่ยนไปอย่างไร และเป็นชื่อโฟลเดอร์สำหรับเก็บไฟล์นี้ด้วย
def export_file(old_table, new_table, extend):
file_name = os.path.basename(old_table)
fn_no_extension = os.path.splitext(file_name)[0]
path_here = os.getcwd()
# ส่งออกตาราง df
directory = os.path.join(path_here, extend)
if not os.path.exists(directory):
os.makedirs(directory)
export_file_dir = os.path.join(directory, fn_no_extension + '_{!s}.csv'.format(extend))
new_table.to_csv(export_file_dir, sep='\t', encoding='utf-8')
print('ส่งออกไฟล์ {!s} แล้ว'.format(fn_no_extension + '_{!s}.csv'.format(extend)))
# เริ่มจากนำเข้า csv ที่ได้จาก txt ที่ export มาจาก line
# แล้วนำไปเปลี่ยนแปลงให้ได้ตารางที่ประกอบด้วย เวลาในการส่งข้อความ (time) ชื่อผู้ส่งข้อความ (name) และ ข้อความ (text)
def clean_table(file_path):
# chat คือ ตารางที่มาจากตาราง csv ที่เราจะ clean
chat = pd.read_csv(file_path)
# chat_mod คือ ตารางที่มาจาก chat แต่ใส่ชื่อให้คอลัมน์ใหม่
chat_mod = pd.DataFrame({'time': chat.ix[:, 0], 'name': chat.ix[:, 1], 'text': chat.ix[:, 2]})
# ถ้าข้อมูลที่ส่งเข้ามาตัดอักษรห้าตัวข้างหน้าแล้วเป็นวันที่ จะถูกส่งกลับแค่วันที่
# ส่วนข้อมูลอื่น ๆ ที่ไม่ใช่เงื่อนไขนี้ จะไม่ถูกทำอะไร ส่งกลับแบบเดิม
def validate(date_text):
try:
datetime.strptime(date_text[5:], '%d/%m/%Y')
b = date_text[5:]
return b
except ValueError:
return date_text
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%H:%M' หรือไม่
def tm(t):
try:
datetime.strptime(t, '%H:%M')
return True
except ValueError:
return False
# ตรวจสอบข้อมูลที่ส่งเข้ามาว่าอยู่ในรูปแบบ '%d/%m/%Y' หรือไม่
def date(d):
try:
datetime.strptime(d, '%d/%m/%Y')
return True
except ValueError:
return False
# เอาข้อมูลในคอลัมน์ time ตัวที่มีชื่อวัน ตัดชื่อวันออก ตัวอื่น ๆ ไม่ทำไร แล้วใส่เป็น list
na = []
for vela in chat_mod['time']:
k = validate(str(vela))
na.append(k)
# เอาข้อมูลในลิสต์ na มาดู
for s in na:
# ถ้าข้อมูลในลิสต์อยู่ในรูปแบบ '%H:%M'
if tm(s):
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s อยู่ในรูปแบบ '%d/%m/%Y'
if date(na[na.index(s) - 1]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ตามด้วย วรรค ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1] + " " + s
# ถ้าข้อมูลใน na ตำแหน่งที่อยู่ก่อนหน้า s ถูกตัดท้าย 6 ตัวอักษร แล้วอยู่ในรูปแบบ '%d/%m/%Y'
elif date(na[na.index(s) - 1][:-6]):
# ให้เปลี่ยนข้อมูลตำแหน่ง s เป็น ข้อมูลตำแหน่งก่อนหน้า ที่ถูกตัดท้าย 6 ตัวอักษรแล้ว ตามด้วย วรรค
# ตามด้วย s ตามเดิม
na[na.index(s)] = na[na.index(s) - 1][:-6] + " " + s
# ถ้าข้อมูลอยู่ในรูปแบบอื่น ๆ ไม่ต้องทำไร
else:
pass
# เสร็จแล้วจะได้ na ที่มีสมาชิกอยู่ในรูปแบบ %d/%m/%Y %H:%M
# time_mod คือคอลัมน์ที่มีวันที่อยู่หน้าเวลา ในรูปแบบ %d/%m/%Y %H:%M
chat_mod['time_mod'] = pd.Series(na)
# fd เป็นตารางที่มี 3 คอลัมน์
fd = chat_mod[['time_mod', 'name', 'text']]
# dfd เป็นตารางที่ลบ row ที่คอลัมน์ text ไม่มีค่า
dfd = fd.dropna(subset=['text'])
# ลิสต์เหล่านี้มาจากแต่ละคอลัมน์ของ dfd
a1 = dfd['time_mod'].tolist()
a2 = dfd['name'].tolist()
a3 = dfd['text'].tolist()
# นำ a1 a2 a3 มาสร้างตารางใหม่ ชื่อ df
df = pd.DataFrame({'time': a1, 'name': a2, 'text': a3})
export_file(file_path, df, 'cleaned')
return df
def time_inter(ct):
b1 = pd.Series(ct['time'])
b2 = pd.Series(ct['time'])
temp_vela = '%d/%m/%Y %H:%M'
la = 0
minute_set = []
for _ in b1:
try:
c1 = datetime.strptime(b1[la - 1], temp_vela)
c2 = datetime.strptime(b2[la], temp_vela)
d1 = c2 - c1
minute_set.append(d1)
la = la + 1
except KeyError:
c1 = datetime.strptime(b1[la], temp_vela)
d1 = c1 - c1
minute_set.append(d1)
la = la + 1
# คอลัมน์ time_ans แสดงเวลาก่อนจะตอบ เป็นหน่วย วันตามด้วยเวลาแบบ 00:00:00
time_ans = pd.Series(minute_set)
# คอลัมน์ time_min แสดงเวลาก่อนจะตอบ เป็นหน่วย minute
time = pd.DatetimeIndex(time_ans)
time_min = (time.day - 1) * 24 * 60 + time.hour * 60 + time.minute
return time_min
def sender_num(ct):
# แปลงชื่อผู้ส่งข้อความเป็นตัวเลข
ra = []
name_set = set(ct['name'].tolist())
name_list = list(name_set)
for each_name in ct['name']:
ra.append(name_list.index(each_name))
return ra
def numb_text(ct):
sii = 1
yaa = []
x = ct['name'].tolist()
lal = 0
# x คือ ลิสต์ของตัวเลขผู้ส่งข้อความ
# พิจารณาตัวเลขผู้ส่งข้อความของแต่ละข้อความ
for each_name in x:
# n คือ เลขผู้ส่งข้อความที่สนใจ
# na2 คือ สมาชิกตัวที่อยู่ก่อนหน้าหน้า n
na2 = x[lal - 1]
# ถ้า เลขผู้ส่งข้อความที่สนใจ เป็นตัวเดียวกับเลขของผู้ส่งข้อความที่อยู่ก่อนหน้า
if each_name == na2:
# เพิ่มค่า sii เดิม ลงใน yaa
yaa.append(sii)
# ถ้า เลขผู้ส่งข้อความที่สนใจ ไม่ใช่ตัวเดียวกับตัวเลขของผู้ส่งข้อความที่อยู่ก่อนหน้า
elif each_name != na2:
# ปรับ sii เป็น 1 แล้วเพิ่มเข้า sii
sii = 1
yaa.append(sii)
# เปลี่ยนค่า sii สำหรับรอใส่ใน yaa ถ้าประโยคต่อไปเป็นผู้ส่งคนเดียวกัน
sii = sii + 1
# เปลี่ยนค่า lal เพื่อเป็นตัวนำไปคำนวณระบุตำแหน่งของตัวเลขผู้ส่งก่อนหน้า
lal = lal + 1
return yaa
def word_separation(text):
# custom_dict = '/Users/bigmorning/Desktop/myword.txt'
sep_text = deepcut.tokenize(text)
join_sep_text = " ".join(sep_text)
return join_sep_text
def extract_value(inp_text, wit_token):
understanding = Wit(wit_token)
deep = understanding.message(inp_text)
try:
intent_value = deep['data'][0]['__wit__legacy_response']['entities']['intent'][0]['value']
except KeyError:
try:
intent_value = deep['entities']['intent'][0]['value']
except KeyError:
intent_value = deep['entities']
return intent_value
def show_progress(mal, l):
try:
s0 = Template('เพิ่มค่า $value ในเซต $set')
s1 = s0.substitute(value=mal, set=l)
except TypeError:
s0 = Template('เพิ่มค่า $value ในเซต $set')
s1 = s0.substitute(value=str(mal), set=l)
return print(s1)
def load_keep(extend, file_path, sv_wcs, sv_secs, sv_scs, sv_ws, sv_ts, sv_ss):
directory = os.path.join(now_here, extend, get_file_name(file_path) + '_keep.txt')
if not os.path.exists(directory):
with open(directory, "wb") as fp:
word_count_set = sv_wcs
sen_count_set = sv_secs
sub_count_set = sv_scs
who_set = sv_ws
toq_set = sv_ts
say_set = sv_ss
pickle.dump((word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set), fp)
return word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set
else:
with open(directory, "rb") as fp:
word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set = pickle.load(fp)
return word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set
def save_keep(extend, file_path, word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set, n):
directory = os.path.join(now_here, extend, get_file_name(file_path) + '_keep.txt')
if n % 5 == 0:
with open(directory, "wb") as fp:
pickle.dump((word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set), fp)
else:
pass
def initial_assignment(file_path, ct):
ia = namedtuple('type', 'wordCount senCount sAppear menWho senType doType')
text = ct['text'].tolist()
word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set = load_keep('analyse',
file_path,
[], [], [], [], [], [])
for n, r in enumerate(text):
if n == len(word_count_set):
print('เริ่มวิเคราะห์ประโยคที่ {!s} : {!s}'.format(str(n), r))
sep_word = word_separation(r)
# นับคำใน text box
word_count = len(sep_word.split())
word_count_set.append(word_count)
show_progress(word_count, 'word_count_set')
# การสื่อสารใน text box เป็นกี่ประโยค มี 0, 1, มากกว่า 1
sen_count = extract_value(sep_word, sec_key)
sen_count_set.append(sen_count)
show_progress(sen_count, 'sen_count_set')
# ระบุประธานของประโยคหรือไม่
sub_count = extract_value(sep_word, sub_key)
sub_count_set.append(sub_count)
show_progress(sub_count, 'sub_count_set')
# ประโยคนี้พูดเกี่ยวกับตัวเอง หรือคู่สนทนา หรือทั้งสอง หรืออย่างอื่น
who = extract_value(sep_word, who_key)
who_set.append(who)
show_progress(who, 'who_set')
# ประโยคนั้นเป็นบอกเล่าหรือคำถาม
toq = extract_value(sep_word, toq_key)
toq_set.append(toq)
show_progress(toq, 'toq_set')
# การกระทำของประโยคนั้น
say = extract_value(sep_word, say_key)
say_set.append(say)
show_progress(say, 'say_set')
print("----------เสร็จสิ้นแถวที่ " + str(n) + " ----------")
save_keep('analyse', file_path, word_count_set, sen_count_set, sub_count_set, who_set, toq_set, say_set, n)
df = pd.DataFrame({'name': ct['name'],
'text': ct['text'],
'wordCount': word_count_set,
'senCount': sen_count_set,
'sAppear': sub_count_set,
'menWho': who_set,
'senType': toq_set,
'doType': say_set})
export_file(file_path, df, 'analyse')
return ia(wordCount=word_count_set,
senCount=sen_count_set,
sAppear=sub_count_set,
menWho=who_set,
senType=toq_set,
doType=say_set)
def som(file_path, ct):
ia = namedtuple('type', 'wordCount senCount sAppear menWho senType doType')
# หาว่าจะเรียกไฟล์ csv ตัวไหนมาซ่อม
# เรียกจากไฟล์ที่ติด extend analyse
ext = 'analyse'
directory = os.path.join(now_here, ext)
# path ที่อยู่ของไฟล์ csv ที่จะเรียกมาซ่อม
call_csv = os.path.join(directory, get_file_name(file_path) + '_{!s}.csv'.format(ext))
# เปิดไฟล์
last_csv = | pd.read_csv(call_csv, sep='\t') | pandas.read_csv |
#!/usr/bin/python3
import sys
import pandas as pd
import numpy as np
import os
import concurrent.futures
import functools, itertools
import sofa_time
import statistics
import multiprocessing as mp
import socket
import ipaddress
# sys.path.insert(0, '/home/st9540808/Desktop/sofa/bin')
import sofa_models, sofa_preprocess
import sofa_config
import sofa_print
colors_send = ['#14f2e0', '#41c8e5', '#6e9eeb']
colors_recv = ['#9a75f0', '#c74bf6', '#f320fa', '#fe2bcc']
color_send = itertools.cycle(colors_send)
color_recv = itertools.cycle(colors_recv)
sofa_ros2_fieldnames = [
"timestamp", # 0
"event", # 1
"duration", # 2
"deviceId", # 3
"copyKind", # 4
"payload", # 5
"bandwidth", # 6
"pkt_src", # 7
"pkt_dst", # 8
"pid", # 9
"tid", # 10
"name", # 11
"category", # 12
"unit", # 13
"msg_id"] # 14
# @profile
def extract_individual_rosmsg(df_send_, df_recv_, *df_others_):
""" Return a dictionary with topic name as key and
a list of ros message as value.
Structure of return value: {topic_name: {(guid, seqnum): log}}
where (guid, seqnum) is a msg_id
"""
# Convert timestamp to unix time
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, *df_others):
# df['ts'] = df['ts'] + unix_time_off
df_send_[1]['ts'] = df_send_[1]['ts'] + df_send_[0].cpu_time_offset + df_send_[0].unix_time_off
df_recv_[1]['ts'] = df_recv_[1]['ts'] + df_recv_[0].cpu_time_offset + df_recv_[0].unix_time_off
df_others = []
for cfg_to_pass, df_other in df_others_:
df_other['ts'] = df_other['ts'] + cfg_to_pass.cpu_time_offset + cfg_to_pass.unix_time_off
df_others.append(df_other)
df_send = df_send_[1]
df_recv = df_recv_[1]
# sort by timestamp
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# other logs (assume there's no happen-before relations that needed to be resolved)
# every dataframe is a dictionary in `other_log_list`
gb_others = [df_other.groupby('guid') for df_other in df_others]
other_log_list = [{guid:log for guid, log in gb_other} for gb_other in gb_others]
# find guids that are in both subsciption and publisher log
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid]
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
print(pubaddr)
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
# print(add_data_calls)
all_RTPSMsg_idx = ((df_send['func'] == '~RTPSMessageGroup') & (df_send['publisher'] == pubaddr))
all_RTPSMsgret_idx = ((df_send['func'] == '~RTPSMessageGroup exit') & (df_send['publisher'] == pubaddr))
all_sendSync_idx = ((df_send['func'] == 'sendSync') & (df_send['publisher'] == pubaddr))
all_nn_xpack_idx = (df['func'] == 'nn_xpack_send1')
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# For grouping RTPSMessageGroup function
try:
ts_gt = (df_send['ts'] > ts) # ts greater than that of add_data_call
RTPSMsg_idx = df_send.loc[ts_gt & all_RTPSMsg_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsg_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
RTPSMsgret_idx = df_send.loc[ts_gt & all_RTPSMsgret_idx]['ts'].idxmin()
modified_row = df_send.loc[RTPSMsgret_idx]
modified_row.at['seqnum'] = add_data_call.loc['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
sendSync_idx = df_send.loc[ts_gt & (df_send['ts'] < df_send.loc[RTPSMsgret_idx, 'ts']) & all_sendSync_idx]
sendSync = sendSync_idx.copy()
sendSync['seqnum'] = add_data_call.loc['seqnum']
modified_rows.extend(row for _, row in sendSync.iterrows())
except ValueError as e:
pass
if 'rmw_cyclonedds_cpp' in df['implementation'].values:
try:
df_cls = other_log_list[0][guid]
seqnum = add_data_call.loc['seqnum']
max_ts = df_cls[(df_cls['layer'] == 'cls_egress') & (df_cls['seqnum'] == seqnum)]['ts'].max()
index = df.loc[(ts < df['ts']) & (df['ts'] < max_ts) & all_nn_xpack_idx].index
df_send_partial.loc[index, 'seqnum'] = seqnum
except ValueError as e:
pass
df_send_partial = pd.concat([df_send_partial, pd.DataFrame(modified_rows)])
# get a subscrption from log
df = all_subscriptions_log[guid]
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
if 'cyclonedds' in df['layer'].unique():
add_recvchange_calls = df[df['func'] == 'ddsi_udp_conn_read exit']
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
if 'cyclonedds' in df['layer'].unique():
free_sample = df.loc[(df['func'] == 'free_sample') & (df['seqnum'] == seqnum)]
if len(free_sample) == 0:
continue
free_sample = free_sample.iloc[0]
if free_sample['ts'] > df.at[rmw_take_idx, 'ts']:
rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# if 'cyclonedds' in df['layer'].unique():
# free_sample = df_recv.loc[(df_recv['ts'] > ts) &
# (df_recv['func'] == 'free_sample') &
# (df_recv['pid'] == df.at[rmw_take_idx, 'pid']) &
# (df_recv['seqnum'] == seqnum)]
# free_sample_idx = free_sample.idxmax()
# if len(free_sample) == 0:
# rmw_take_idx = df.loc[(df['ts'] > free_sample['ts']) & subs_map[subaddr]]['ts'].idxmin()
# print(df.loc[rmw_take_idx])
# free_sample() should be called in rmw_take, therefore
# free_sample() happened before rmw_take_with_info returns
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df.loc[(df['ts'] < ts) &
(df['func'] == 'UDPResourceReceive exit') &
(df['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax()
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
except ValueError as e:
pass
try:
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
# Doesn't need to remove duplicates for
# a = pd.DataFrame(modified_rows)
# print(a[~a.index.duplicated(keep='first')])
df_recv_partial = pd.concat([df_recv_partial, pd.DataFrame(modified_rows)])
# Merge all modified dataframes
df_merged = df_send_partial.append(df_recv_partial, ignore_index=True, sort=False)
# handle other log files
for other_log in other_log_list:
df_other = other_log[guid]
df_merged = df_merged.append(df_other, ignore_index=True, sort=False)
# Avoid `TypeError: boolean value of NA is ambiguous` when calling groupby()
df_merged['subscriber'] = df_merged['subscriber'].fillna(np.nan)
df_merged['guid'] = df_merged['guid'].fillna(np.nan)
df_merged['seqnum'] = df_merged['seqnum'].fillna(np.nan)
df_merged.sort_values(by=['ts'], inplace=True)
gb_merged = df_merged.groupby(['guid', 'seqnum'])
ros_msgs = {msg_id:log for msg_id, log in gb_merged} # msg_id: (guid, seqnum)
# get topic name from log
topic_name = df_merged['topic_name'].dropna().unique()
if len(topic_name) > 1:
raise Exception("More than one topic in a log file")
topic_name = topic_name[0]
if topic_name in res:
res[topic_name] = {**res[topic_name], **ros_msgs}
else:
res[topic_name] = ros_msgs
print('finished parsing ' + topic_name)
return res
def extract_individual_rosmsg2(df_send, df_recv, df_cls):
# unix_time_off = statistics.median(sofa_time.get_unix_mono_diff() for i in range(100))
# for df in (df_send, df_recv, df_cls):
# df['ts'] = df['ts'] + unix_time_off
df_send.sort_values(by=['ts'], ignore_index=True)
df_recv.sort_values(by=['ts'], ignore_index=True)
df_cls.sort_values(by=['ts'], ignore_index=True)
# publish side
gb_send = df_send.groupby('guid')
all_publishers_log = {guid:log for guid, log in gb_send}
# subscription side
gb_recv = df_recv.groupby('guid')
all_subscriptions_log = {guid:log for guid, log in gb_recv}
# in kernel (probably not need it)
gb_cls = df_cls.groupby('guid')
all_cls_log = {guid:log for guid, log in gb_cls}
interested_guids = all_subscriptions_log.keys() \
& all_publishers_log.keys()
res = {}
for guid in interested_guids:
# get a publisher from log
df = all_publishers_log[guid].copy()
df_send_partial = all_publishers_log[guid].copy()
add_data_calls = df[~pd.isna(df['seqnum'])] # get all non-NaN seqnums in log
try:
pubaddr, = pd.unique(df['publisher']).dropna()
except ValueError as e:
print('Find a guid that is not associated with a publisher memory address. Error: ' + str(e))
continue
print(pubaddr)
modified_rows = []
for idx, add_data_call in add_data_calls.iterrows():
seqnum = add_data_call['seqnum']
ts = add_data_call['ts']
rcl_idx = df.loc[(df['ts'] < ts) & (df['layer'] == 'rcl')]['ts'].idxmax()
df_send_partial.loc[rcl_idx, 'seqnum'] = add_data_call.loc['seqnum']
# Use the two timestamps to get a slice of dataframe
# Here we drop :~RTPSMessageGroup exit"
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_egress')]['ts'].max() # Get ts upper bound
df_send_tgt = df_send[(ts <= df_send['ts']) &
(df_send['ts'] <= ts_cls) &
(df_send['publisher'] == pubaddr)]
modified_row = df_send_tgt.copy()
modified_row['guid'] = guid
modified_row['seqnum'] = seqnum
modified_rows.append(modified_row)
df_send_partial = df_send_partial.combine_first(pd.concat(modified_rows))
# get a subscrption from log
df = all_subscriptions_log[guid].copy()
df_recv_partial = all_subscriptions_log[guid].copy()
add_recvchange_calls = df[~pd.isna(df['seqnum'])] # get all not nan seqnums in log
all_sub = pd.unique(df['subscriber']) # How many subscribers subscribe to this topic?
subs_map = {sub: (df['subscriber'] == sub) &
(df['func'] == "rmw_take_with_info exit") for sub in all_sub}
all_pid = pd.unique(df_recv['pid'])
pid_maps = {pid: (df_recv['pid'] == pid) &
(df_recv['func'] == "rmw_wait exit") for pid in all_pid}
modified_rows = []
for idx, add_recvchange_call in add_recvchange_calls.iterrows():
ts = add_recvchange_call.at['ts']
subaddr = add_recvchange_call.at['subscriber']
seqnum = add_recvchange_call.at['seqnum']
# Use the two timestamps to get a slice of dataframe
ts_cls = df_cls[(df_cls['guid'] == guid) &
(df_cls['seqnum'] == seqnum) &
(df_cls['layer'] == 'cls_ingress')]['ts'].min()
df_recv_tgt = df_recv[(ts_cls < df_recv['ts']) & (df_recv['ts'] < ts)].copy()
# Consider missing `rmw_take_with_info exit` here
try:
rmw_take_idx = df.loc[(df['ts'] > ts) & subs_map[subaddr]]['ts'].idxmin()
df_recv_partial.at[rmw_take_idx, 'seqnum'] = seqnum
# TODO: Group by ip port in cls_ingress
UDPResourceReceive_idx = df_recv_tgt.loc[(df_recv_tgt['func'] == 'UDPResourceReceive exit') &
(df_recv_tgt['pid'] == add_recvchange_call.at['pid'])]['ts'].idxmax();
df_recv_partial.at[UDPResourceReceive_idx, 'seqnum'] = seqnum
# Group rmw_wait exit
pid = df_recv_partial.at[rmw_take_idx, 'pid']
rmw_wait_idx = df_recv.loc[(df_recv['ts'] < df_recv_partial.at[rmw_take_idx,'ts']) &
pid_maps[pid]]['ts'].idxmax()
modified_row = df_recv.loc[rmw_wait_idx]
modified_row.at['seqnum'] = add_recvchange_call.at['seqnum']
modified_row.at['guid'] = guid
modified_rows.append(modified_row)
except ValueError as e:
pass
df_recv_partial = pd.concat([df_recv_partial, | pd.DataFrame(modified_rows) | pandas.DataFrame |
import flask
from flask import request
from flask import jsonify
import numpy as np
import pandas as pd
app = flask.Flask(__name__)
from sklearn.feature_extraction.text import TfidfVectorizer
import hdbscan
def training(df_train, clf_params):
params = {'alpha':1.0, 'min_samples':2, 'min_cluster_size':5, 'cluster_selection_epsilon':0.0}
params.update(clf_params)
alpha = params['alpha']
min_samples = params['min_samples']
min_cluster_size = params['min_cluster_size']
cluster_selection_epsilon = params['cluster_selection_epsilon']
if 'emb' in df_train.columns:
X = df_train['emb'].tolist()
else:
corpus = df_train['text'].tolist()
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(corpus)
clustering = hdbscan.HDBSCAN(alpha=alpha, min_samples=min_samples, min_cluster_size=min_cluster_size,
cluster_selection_epsilon=cluster_selection_epsilon).fit(X)
df_result = pd.DataFrame(columns=['id','cluster','probability'])
df_result['id'] = df_train['id'].copy()
df_result['cluster'] = clustering.labels_
df_result['probability'] = clustering.probabilities_
res = {}
res['df_res'] = df_result.to_dict()
return clustering, res
@app.route('/hdbscan/', methods=['POST'])
def home():
df_train = request.json['train_data']
df_train = | pd.DataFrame(df_train) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a streaming dataframe.
"""
import pickle
import os
from io import StringIO, BytesIO
from inspect import isfunction
import numpy
import numpy.random as nrandom
import pandas
from pandas.testing import assert_frame_equal
from pandas.io.json import json_normalize
from .dataframe_split import sklearn_train_test_split, sklearn_train_test_split_streaming
from .dataframe_io_helpers import enumerate_json_items, JsonIterator2Stream
class StreamingDataFrameSchemaError(Exception):
"""
Reveals an issue with inconsistant schemas.
"""
pass
class StreamingDataFrame:
"""
Defines a streaming dataframe.
The goal is to reduce the memory footprint.
The class takes a function which creates an iterator
on :epkg:`dataframe`. We assume this function can
be called multiple time. As a matter of fact, the
function is called every time the class needs to walk
through the stream with the following loop:
::
for df in self: # self is a StreamingDataFrame
# ...
The constructor cannot receive an iterator otherwise
this class would be able to walk through the data
only once. The main reason is it is impossible to
:epkg:`*py:pickle` (or :epkg:`dill`)
an iterator: it cannot be replicated.
Instead, the class takes a function which generates
an iterator on :epkg:`DataFrame`.
Most of the methods returns either a :epkg:`DataFrame`
either a @see cl StreamingDataFrame. In the second case,
methods can be chained.
By default, the object checks that the schema remains
the same between two chunks. This can be disabled
by setting *check_schema=False* in the constructor.
The user should expect the data to remain stable.
Every loop should produce the same data. However,
in some situations, it is more efficient not to keep
that constraints. Draw a random @see me sample
is one of these cases.
:param iter_creation: function which creates an iterator or an
instance of @see cl StreamingDataFrame
:param check_schema: checks that the schema is the same
for every :epkg:`dataframe`
:param stable: indicates if the :epkg:`dataframe` remains the same
whenever it is walked through
"""
def __init__(self, iter_creation, check_schema=True, stable=True):
self._delete_ = []
if isinstance(iter_creation, (pandas.DataFrame, dict,
numpy.ndarray, str)):
raise TypeError(
"Unexpected type %r for iter_creation. It must "
"be an iterator." % type(iter_creation))
if isinstance(iter_creation, StreamingDataFrame):
self.iter_creation = iter_creation.iter_creation
self.stable = iter_creation.stable
else:
self.iter_creation = iter_creation
self.stable = stable
self.check_schema = check_schema
def is_stable(self, do_check=False, n=10):
"""
Tells if the :epkg:`dataframe` is supposed to be stable.
@param do_check do not trust the value sent to the constructor
@param n number of rows used to check the stability,
None for all rows
@return boolean
*do_check=True* means the methods checks the first
*n* rows remains the same for two iterations.
"""
if do_check:
for i, (a, b) in enumerate(zip(self, self)):
if n is not None and i >= n:
break
try:
assert_frame_equal(a, b)
except AssertionError: # pragma: no cover
return False
return True
else:
return self.stable
def get_kwargs(self):
"""
Returns the parameters used to call the constructor.
"""
return dict(check_schema=self.check_schema)
def train_test_split(self, path_or_buf=None, export_method="to_csv",
names=None, streaming=True, partitions=None,
**kwargs):
"""
Randomly splits a :epkg:`dataframe` into smaller pieces.
The function returns streams of file names.
It chooses one of the options from module
:mod:`dataframe_split <pandas_streaming.df.dataframe_split>`.
@param path_or_buf a string, a list of strings or buffers, if it is a
string, it must contain ``{}`` like ``partition{}.txt``,
if None, the function returns strings.
@param export_method method used to store the partitions, by default
:epkg:`pandas:DataFrame:to_csv`, additional parameters
will be given to that function
@param names partitions names, by default ``('train', 'test')``
@param kwargs parameters for the export function and
:epkg:`sklearn:model_selection:train_test_split`.
@param streaming the function switches to a
streaming version of the algorithm.
@param partitions splitting partitions
@return outputs of the exports functions or two
@see cl StreamingDataFrame if path_or_buf is None.
The streaming version of this algorithm is implemented by function
@see fn sklearn_train_test_split_streaming. Its documentation
indicates the limitation of the streaming version and gives some
insights about the additional parameters.
"""
if streaming:
if partitions is not None:
if len(partitions) != 2:
raise NotImplementedError( # pragma: no cover
"Only train and test split is allowed, *partitions* "
"must be of length 2.")
kwargs = kwargs.copy()
kwargs['train_size'] = partitions[0]
kwargs['test_size'] = partitions[1]
return sklearn_train_test_split_streaming(self, **kwargs)
return sklearn_train_test_split(self, path_or_buf=path_or_buf,
export_method=export_method,
names=names, **kwargs)
@staticmethod
def _process_kwargs(kwargs):
"""
Filters out parameters for the constructor of this class.
"""
kw = {}
for k in ['check_schema']:
if k in kwargs:
kw[k] = kwargs[k]
del kwargs[k]
return kw
@staticmethod
def read_json(*args, chunksize=100000, flatten=False, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`json` file or buffer as an iterator
on :epkg:`DataFrame`. The signature is the same as
:epkg:`pandas:read_json`. The important parameter is
*chunksize* which defines the number
of rows to parse in a single bloc
and it must be defined to return an iterator.
If *lines* is True, the function falls back into
:epkg:`pandas:read_json`, otherwise it used
@see fn enumerate_json_items. If *lines* is ``'stream'``,
*enumerate_json_items* is called with parameter
``lines=True``.
Parameter *flatten* uses the trick described at
`Flattening JSON objects in Python
<https://towardsdatascience.com/flattening-json-objects-in-python-f5343c794b10>`_.
Examples:
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''{"a": 1, "b": 2}
{"a": 3, "b": 4}'''
it = StreamingDataFrame.read_json(BytesIO(data), lines=True)
dfs = list(it)
print(dfs)
.. runpython::
:showcode:
from io import BytesIO
from pandas_streaming.df import StreamingDataFrame
data = b'''[{"a": 1,
"b": 2},
{"a": 3,
"b": 4}]'''
it = StreamingDataFrame.read_json(BytesIO(data))
dfs = list(it)
print(dfs)
.. index:: IncompleteJSONError
The parsed json must have an empty line at the end otherwise
the following exception is raised:
`ijson.common.IncompleteJSONError: `
`parse error: unallowed token at this point in JSON text`.
"""
if not isinstance(chunksize, int) or chunksize <= 0:
raise ValueError( # pragma: no cover
'chunksize must be a positive integer')
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
if isinstance(args[0], (list, dict)):
if flatten:
return StreamingDataFrame.read_df(
json_normalize(args[0]), **kwargs_create)
return StreamingDataFrame.read_df(args[0], **kwargs_create)
if kwargs.get('lines', None) == 'stream':
del kwargs['lines']
def localf(a0=args[0]):
if hasattr(a0, 'seek'):
a0.seek(0)
return enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), lines=True,
flatten=flatten)
st = JsonIterator2Stream(localf)
args = args[1:]
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=None, lines=True, **kwargs),
**kwargs_create)
def fct1(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct1, **kwargs_create)
if kwargs.get('lines', False):
if flatten:
raise NotImplementedError(
"flatten==True is implemented with option lines='stream'")
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(*args, chunksize=None, **kwargs),
**kwargs_create)
def fct2(args=args, chunksize=chunksize, kw=kwargs.copy()):
for r in pandas.read_json(
*args, chunksize=chunksize, nrows=chunksize, **kw):
yield r
return StreamingDataFrame(fct2, **kwargs_create)
st = JsonIterator2Stream(
lambda a0=args[0]: enumerate_json_items(
a0, encoding=kwargs.get('encoding', None), flatten=flatten))
args = args[1:]
if 'lines' in kwargs:
del kwargs['lines']
if chunksize is None:
return StreamingDataFrame(
lambda: pandas.read_json(
st, *args, chunksize=chunksize, lines=True, **kwargs),
**kwargs_create)
def fct3(st=st, args=args, chunksize=chunksize, kw=kwargs.copy()):
if hasattr(st, 'seek'):
st.seek(0)
for r in pandas.read_json(
st, *args, chunksize=chunksize, nrows=chunksize,
lines=True, **kw):
yield r
return StreamingDataFrame(fct3, **kwargs_create)
@staticmethod
def read_csv(*args, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`csv` file or buffer
as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc. If not specified,
it will be equal to 100000.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
return StreamingDataFrame(lambda: pandas.read_csv(*args, **kwargs), **kwargs_create)
@staticmethod
def read_str(text, **kwargs) -> 'StreamingDataFrame':
"""
Reads a :epkg:`DataFrame` as an iterator on :epkg:`DataFrame`.
The signature is the same as :epkg:`pandas:read_csv`.
The important parameter is *chunksize* which defines the number
of rows to parse in a single bloc.
"""
if not kwargs.get('iterator', True):
raise ValueError("If specified, iterator must be True.")
if not kwargs.get('chunksize', 100000):
raise ValueError("If specified, chunksize must not be None.")
kwargs_create = StreamingDataFrame._process_kwargs(kwargs)
kwargs['iterator'] = True
if 'chunksize' not in kwargs:
kwargs['chunksize'] = 100000
if isinstance(text, str):
buffer = StringIO(text)
else:
buffer = BytesIO(text)
return StreamingDataFrame(
lambda: pandas.read_csv(buffer, **kwargs), **kwargs_create)
@staticmethod
def read_df(df, chunksize=None, check_schema=True) -> 'StreamingDataFrame':
"""
Splits a :epkg:`DataFrame` into small chunks mostly for
unit testing purposes.
@param df :epkg:`DataFrame`
@param chunksize number rows per chunks (// 10 by default)
@param check_schema check schema between two iterations
@return iterator on @see cl StreamingDataFrame
"""
if chunksize is None:
if hasattr(df, 'shape'):
chunksize = df.shape[0]
else:
raise NotImplementedError(
"Cannot retrieve size to infer chunksize for type={0}"
".".format(type(df)))
if hasattr(df, 'shape'):
size = df.shape[0]
else:
raise NotImplementedError( # pragma: no cover
"Cannot retrieve size for type={0}.".format(type(df)))
def local_iterator():
"local iterator"
for i in range(0, size, chunksize):
end = min(size, i + chunksize)
yield df[i:end].copy()
return StreamingDataFrame(local_iterator, check_schema=check_schema)
def __iter__(self):
"""
Iterator on a large file with a sliding window.
Each windows is a :epkg:`DataFrame`.
The method stores a copy of the initial iterator
and restores it after the end of the iterations.
If *check_schema* was enabled when calling the constructor,
the method checks that every :epkg:`DataFrame`
follows the same schema as the first chunck.
Even with a big chunk size, it might happen
that consecutive chunks might detect different type
for one particular column. An error message shows up
saying ``Column types are different after row``
with more information about the column which failed.
In that case, :epkg:`pandas:DataFrame.read_csv` can overwrite
the type on one column by specifying
``dtype={column_name: new_type}``. It frequently happens
when a string column has many missing values.
"""
iters = self.iter_creation()
sch = None
rows = 0
for it in iters:
if sch is None:
sch = (list(it.columns), list(it.dtypes))
elif self.check_schema:
if list(it.columns) != sch[0]: # pylint: disable=E1136
raise StreamingDataFrameSchemaError( # pragma: no cover
'Column names are different after row {0}\nFirst chunk: {1}'
'\nCurrent chunk: {2}'.format(
rows, sch[0], list(it.columns))) # pylint: disable=E1136
if list(it.dtypes) != sch[1]: # pylint: disable=E1136
errdf = pandas.DataFrame(
dict(names=sch[0], schema1=sch[1], # pylint: disable=E1136
schema2=list(it.dtypes))) # pylint: disable=E1136
tdf = StringIO()
errdf['diff'] = errdf['schema2'] != errdf['schema1']
errdf = errdf[errdf['diff']]
errdf.to_csv(tdf, sep=",", index=False)
raise StreamingDataFrameSchemaError(
'Column types are different after row {0}. You may use option '
'dtype={{"column_name": str}} to force the type on this column.'
'\n---\n{1}'.format(rows, tdf.getvalue()))
rows += it.shape[0]
yield it
@property
def shape(self):
"""
This is the kind of operations you do not want to do
when a file is large because it goes through the whole
stream just to get the number of rows.
"""
nl, nc = 0, 0
for it in self:
nc = max(it.shape[1], nc)
nl += it.shape[0]
return nl, nc
@property
def columns(self):
"""
See :epkg:`pandas:DataFrame:columns`.
"""
for it in self:
return it.columns
# The dataframe is empty.
return []
@property
def dtypes(self):
"""
See :epkg:`pandas:DataFrame:dtypes`.
"""
for it in self:
return it.dtypes
def to_csv(self, path_or_buf=None, **kwargs) -> 'StreamingDataFrame':
"""
Saves the :epkg:`DataFrame` into string.
See :epkg:`pandas:DataFrame.to_csv`.
"""
if path_or_buf is None:
st = StringIO()
close = False
elif isinstance(path_or_buf, str):
st = open( # pylint: disable=R1732
path_or_buf, "w", encoding=kwargs.get('encoding'))
close = True
else:
st = path_or_buf
close = False
for df in self:
df.to_csv(st, **kwargs)
kwargs['header'] = False
if close:
st.close()
if isinstance(st, StringIO):
return st.getvalue()
return path_or_buf
def to_dataframe(self) -> pandas.DataFrame:
"""
Converts everything into a single :epkg:`DataFrame`.
"""
return pandas.concat(self, axis=0)
def to_df(self) -> pandas.DataFrame:
"""
Converts everything into a single :epkg:`DataFrame`.
"""
return self.to_dataframe()
def iterrows(self):
"""
See :epkg:`pandas:DataFrame:iterrows`.
"""
for df in self:
for it in df.iterrows():
yield it
def head(self, n=5) -> pandas.DataFrame:
"""
Returns the first rows as a :epkg:`DataFrame`.
"""
st = []
total = 0
for df in self:
h = df.head(n=n)
total += h.shape[0]
st.append(h)
if total >= n:
break
n -= h.shape[0]
if len(st) == 1:
return st[0]
if len(st) == 0:
return None
return pandas.concat(st, axis=0)
def tail(self, n=5) -> pandas.DataFrame:
"""
Returns the last rows as a :epkg:`DataFrame`.
The size of chunks must be greater than ``n`` to
get ``n`` lines. This method is not efficient
because the whole dataset must be walked through.
"""
for df in self:
h = df.tail(n=n)
return h
def where(self, *args, **kwargs) -> 'StreamingDataFrame':
"""
Applies :epkg:`pandas:DataFrame:where`.
*inplace* must be False.
This function returns a @see cl StreamingDataFrame.
"""
kwargs['inplace'] = False
return StreamingDataFrame(
lambda: map(lambda df: df.where(*args, **kwargs), self),
**self.get_kwargs())
def sample(self, reservoir=False, cache=False, **kwargs) -> 'StreamingDataFrame':
"""
See :epkg:`pandas:DataFrame:sample`.
Only *frac* is available, otherwise choose
@see me reservoir_sampling.
This function returns a @see cl StreamingDataFrame.
@param reservoir use `reservoir sampling <https://en.wikipedia.org/wiki/Reservoir_sampling>`_
@param cache cache the sample
@param kwargs additional parameters for :epkg:`pandas:DataFrame:sample`
If *cache* is True, the sample is cached (assuming it holds in memory).
The second time an iterator walks through the
"""
if reservoir or 'n' in kwargs:
if 'frac' in kwargs:
raise ValueError(
'frac cannot be specified for reservoir sampling.')
return self._reservoir_sampling(cache=cache, n=kwargs['n'], random_state=kwargs.get('random_state'))
if cache:
sdf = self.sample(cache=False, **kwargs)
df = sdf.to_df()
return StreamingDataFrame.read_df(df, chunksize=df.shape[0])
return StreamingDataFrame(lambda: map(lambda df: df.sample(**kwargs), self), **self.get_kwargs(), stable=False)
def _reservoir_sampling(self, cache=True, n=1000, random_state=None) -> 'StreamingDataFrame':
"""
Uses the `reservoir sampling <https://en.wikipedia.org/wiki/Reservoir_sampling>`_
algorithm to draw a random sample with exactly *n* samples.
@param cache cache the sample
@param n number of observations to keep
@param random_state sets the random_state
@return @see cl StreamingDataFrame
.. warning::
The sample is split by chunks of size 1000.
This parameter is not yet exposed.
"""
if not cache:
raise ValueError(
"cache=False is not available for reservoir sampling.")
indices = []
seen = 0
for i, df in enumerate(self):
for ir, _ in enumerate(df.iterrows()):
seen += 1
if len(indices) < n:
indices.append((i, ir))
else:
x = nrandom.random() # pylint: disable=E1101
if x * n < (seen - n):
k = nrandom.randint(0, len(indices) - 1)
indices[k] = (i, ir) # pylint: disable=E1126
indices = set(indices)
def reservoir_iterate(sdf, indices, chunksize):
"iterator"
buffer = []
for i, df in enumerate(self):
for ir, row in enumerate(df.iterrows()):
if (i, ir) in indices:
buffer.append(row)
if len(buffer) >= chunksize:
yield pandas.DataFrame(buffer)
buffer.clear()
if len(buffer) > 0:
yield pandas.DataFrame(buffer)
return StreamingDataFrame(
lambda: reservoir_iterate(sdf=self, indices=indices, chunksize=1000))
def apply(self, *args, **kwargs) -> 'StreamingDataFrame':
"""
Applies :epkg:`pandas:DataFrame:apply`.
This function returns a @see cl StreamingDataFrame.
"""
return StreamingDataFrame(
lambda: map(lambda df: df.apply(*args, **kwargs), self),
**self.get_kwargs())
def applymap(self, *args, **kwargs) -> 'StreamingDataFrame':
"""
Applies :epkg:`pandas:DataFrame:applymap`.
This function returns a @see cl StreamingDataFrame.
"""
return StreamingDataFrame(
lambda: map(lambda df: df.applymap(*args, **kwargs), self),
**self.get_kwargs())
def merge(self, right, **kwargs) -> 'StreamingDataFrame':
"""
Merges two @see cl StreamingDataFrame and returns @see cl StreamingDataFrame.
*right* can be either a @see cl StreamingDataFrame or simply
a :epkg:`pandas:DataFrame`. It calls :epkg:`pandas:DataFrame:merge` in
a double loop, loop on *self*, loop on *right*.
"""
if isinstance(right, pandas.DataFrame):
return self.merge(StreamingDataFrame.read_df(right, chunksize=right.shape[0]), **kwargs)
def iterator_merge(sdf1, sdf2, **kw):
"iterate on dataframes"
for df1 in sdf1:
for df2 in sdf2:
df = df1.merge(df2, **kw)
yield df
return StreamingDataFrame(
lambda: iterator_merge(self, right, **kwargs), **self.get_kwargs())
def concat(self, others, axis=0) -> 'StreamingDataFrame':
"""
Concatenates :epkg:`dataframes`. The function ensures all :epkg:`pandas:DataFrame`
or @see cl StreamingDataFrame share the same columns (name and type).
Otherwise, the function fails as it cannot guess the schema without
walking through all :epkg:`dataframes`.
:param others: list, enumeration, :epkg:`pandas:DataFrame`
:param axis: concatenate by rows (0) or by columns (1)
:return: @see cl StreamingDataFrame
"""
if axis == 1:
return self._concath(others)
if axis == 0:
return self._concatv(others)
raise ValueError("axis must be 0 or 1") # pragma: no cover
def _concath(self, others):
if not isinstance(others, list):
others = [others]
def iterateh(self, others):
cols = tuple([self] + others)
for dfs in zip(*cols):
nrows = [_.shape[0] for _ in dfs]
if min(nrows) != max(nrows):
raise RuntimeError(
"StreamingDataFram cannot merge DataFrame with different size or chunksize")
yield pandas.concat(list(dfs), axis=1)
return StreamingDataFrame(lambda: iterateh(self, others), **self.get_kwargs())
def _concatv(self, others):
def iterator_concat(this, lothers):
"iterator on dataframes"
columns = None
dtypes = None
for df in this:
if columns is None:
columns = df.columns
dtypes = df.dtypes
yield df
for obj in lothers:
check = True
for i, df in enumerate(obj):
if check:
if list(columns) != list(df.columns):
raise ValueError(
"Frame others[{0}] do not have the same column names or the same order.".format(i))
if list(dtypes) != list(df.dtypes):
raise ValueError(
"Frame others[{0}] do not have the same column types.".format(i))
check = False
yield df
if isinstance(others, pandas.DataFrame):
others = [others]
elif isinstance(others, StreamingDataFrame):
others = [others]
def change_type(obj):
"change column type"
if isinstance(obj, pandas.DataFrame):
return StreamingDataFrame.read_df(obj, obj.shape[0])
else:
return obj
others = list(map(change_type, others))
return StreamingDataFrame(
lambda: iterator_concat(self, others), **self.get_kwargs())
def groupby(self, by=None, lambda_agg=None, lambda_agg_agg=None,
in_memory=True, **kwargs) -> pandas.DataFrame:
"""
Implements the streaming :epkg:`pandas:DataFrame:groupby`.
We assume the result holds in memory. The out-of-memory is
not implemented yet.
@param by see :epkg:`pandas:DataFrame:groupby`
@param in_memory in-memory algorithm
@param lambda_agg aggregation function, *sum* by default
@param lambda_agg_agg to aggregate the aggregations, *sum* by default
@param kwargs additional parameters for :epkg:`pandas:DataFrame:groupby`
@return :epkg:`pandas:DataFrame`
As the input @see cl StreamingDataFrame does not necessarily hold
in memory, the aggregation must be done at every iteration.
There are two levels of aggregation: one to reduce every iterated
:epkg:`dataframe`, another one to combine all the reduced :epkg:`dataframes`.
This second one is always a **sum**.
As a consequence, this function should not compute any *mean* or *count*,
only *sum* because we do not know the size of each iterated
:epkg:`dataframe`. To compute an average, sum and weights must be
aggregated.
Parameter *lambda_agg* is ``lambda gr: gr.sum()`` by default.
It could also be ``lambda gr: gr.max()`` or
``lambda gr: gr.min()`` but not ``lambda gr: gr.mean()``
as it would lead to incoherent results.
.. exref::
:title: StreamingDataFrame and groupby
:tag: streaming
Here is an example which shows how to write a simple *groupby*
with :epkg:`pandas` and @see cl StreamingDataFrame.
.. runpython::
:showcode:
from pandas import DataFrame
from pandas_streaming.df import StreamingDataFrame
df = DataFrame(dict(A=[3, 4, 3], B=[5,6, 7]))
sdf = StreamingDataFrame.read_df(df)
# The following:
print(sdf.groupby("A", lambda gr: gr.sum()))
# Is equivalent to:
print(df.groupby("A").sum())
"""
if not in_memory:
raise NotImplementedError(
"Out-of-memory group by is not implemented.")
if lambda_agg is None:
def lambda_agg_(gr):
"sum"
return gr.sum()
lambda_agg = lambda_agg_
if lambda_agg_agg is None:
def lambda_agg_agg_(gr):
"sum"
return gr.sum()
lambda_agg_agg = lambda_agg_agg_
ckw = kwargs.copy()
ckw["as_index"] = False
agg = []
for df in self:
gr = df.groupby(by=by, **ckw)
agg.append(lambda_agg(gr))
conc = pandas.concat(agg, sort=False)
return lambda_agg_agg(conc.groupby(by=by, **kwargs))
def groupby_streaming(self, by=None, lambda_agg=None, lambda_agg_agg=None, in_memory=True,
strategy='cum', **kwargs) -> pandas.DataFrame:
"""
Implements the streaming :epkg:`pandas:DataFrame:groupby`.
We assume the result holds in memory. The out-of-memory is
not implemented yet.
:param by: see :epkg:`pandas:DataFrame:groupby`
:param in_memory: in-memory algorithm
:param lambda_agg: aggregation function, *sum* by default
:param lambda_agg_agg: to aggregate the aggregations, *sum* by default
:param kwargs: additional parameters for :epkg:`pandas:DataFrame:groupby`
:param strategy: ``'cum'``, or ``'streaming'``, see below
:return: :epkg:`pandas:DataFrame`
As the input @see cl StreamingDataFrame does not necessarily hold
in memory, the aggregation must be done at every iteration.
There are two levels of aggregation: one to reduce every iterated
:epkg:`dataframe`, another one to combine all the reduced :epkg:`dataframes`.
This second one is always a **sum**.
As a consequence, this function should not compute any *mean* or *count*,
only *sum* because we do not know the size of each iterated
:epkg:`dataframe`. To compute an average, sum and weights must be
aggregated.
Parameter *lambda_agg* is ``lambda gr: gr.sum()`` by default.
It could also be ``lambda gr: gr.max()`` or
``lambda gr: gr.min()`` but not ``lambda gr: gr.mean()``
as it would lead to incoherent results.
Parameter *strategy* allows three scenarios.
First one if ``strategy is None`` goes through
the whole datasets to produce a final :epkg:`DataFrame`.
Second if ``strategy=='cum'`` returns a
@see cl StreamingDataFrame, each iteration produces
the current status of the *group by*. Last case,
``strategy=='streaming'`` produces :epkg:`DataFrame`
which must be concatenated into a single :epkg:`DataFrame`
and grouped again to get the results.
.. exref::
:title: StreamingDataFrame and groupby
:tag: streaming
Here is an example which shows how to write a simple *groupby*
with :epkg:`pandas` and @see cl StreamingDataFrame.
.. runpython::
:showcode:
from pandas import DataFrame
from pandas_streaming.df import StreamingDataFrame
from pandas_streaming.data import dummy_streaming_dataframe
df20 = dummy_streaming_dataframe(20).to_dataframe()
df20["key"] = df20["cint"].apply(lambda i: i % 3 == 0)
sdf20 = StreamingDataFrame.read_df(df20, chunksize=5)
sgr = sdf20.groupby_streaming("key", lambda gr: gr.sum(),
strategy='cum', as_index=False)
for gr in sgr:
print()
print(gr)
"""
if not in_memory:
raise NotImplementedError(
"Out-of-memory group by is not implemented.")
if lambda_agg is None:
def lambda_agg_(gr):
"sum"
return gr.sum()
lambda_agg = lambda_agg_
if lambda_agg_agg is None:
def lambda_agg_agg_(gr):
"sum"
return gr.sum()
lambda_agg_agg = lambda_agg_agg_
ckw = kwargs.copy()
ckw["as_index"] = False
if strategy == 'cum':
def iterate_cum():
agg = None
for df in self:
gr = df.groupby(by=by, **ckw)
gragg = lambda_agg(gr)
if agg is None:
yield lambda_agg_agg(gragg.groupby(by=by, **kwargs))
agg = gragg
else:
lagg = pandas.concat([agg, gragg], sort=False)
yield lambda_agg_agg(lagg.groupby(by=by, **kwargs))
agg = lagg
return StreamingDataFrame(lambda: iterate_cum(), **self.get_kwargs())
if strategy == 'streaming':
def iterate_streaming():
for df in self:
gr = df.groupby(by=by, **ckw)
gragg = lambda_agg(gr)
yield lambda_agg(gragg.groupby(by=by, **kwargs))
return StreamingDataFrame(lambda: iterate_streaming(), **self.get_kwargs())
raise ValueError( # pragma: no cover
"Unknown strategy '{0}'".format(strategy))
def ensure_dtype(self, df, dtypes):
"""
Ensures the :epkg:`dataframe` *df* has types indicated in dtypes.
Changes it if not.
:param df: dataframe
:param dtypes: list of types
:return: updated?
"""
ch = False
cols = df.columns
for i, (has, exp) in enumerate(zip(df.dtypes, dtypes)):
if has != exp:
name = cols[i]
df[name] = df[name].astype(exp)
ch = True
return ch
def __getitem__(self, *args):
"""
Implements some of the functionalities :epkg:`pandas`
offers for the operator ``[]``.
"""
if len(args) != 1:
raise NotImplementedError("Only a list of columns is supported.")
cols = args[0]
if isinstance(cols, str):
# One column.
iter_creation = self.iter_creation
def iterate_col():
"iterate on one column"
one_col = [cols]
for df in iter_creation():
yield df[one_col]
return StreamingSeries(iterate_col, **self.get_kwargs())
if not isinstance(cols, list):
raise NotImplementedError("Only a list of columns is supported.")
def iterate_cols(sdf):
"""Iterate on columns."""
for df in sdf:
yield df[cols]
return StreamingDataFrame(lambda: iterate_cols(self), **self.get_kwargs())
def __setitem__(self, index, value):
"""
Limited set of operators are supported.
"""
if not isinstance(index, str):
raise ValueError(
"Only column affected are supported but index=%r." % index)
if isinstance(value, (int, float, numpy.number, str)):
# Is is equivalent to add_column.
iter_creation = self.iter_creation
def iterate_fct():
"iterate on rows"
iters = iter_creation()
for df in iters:
dfc = df.copy()
dfc[index] = value
yield dfc
self.iter_creation = iterate_fct
elif isinstance(value, StreamingSeries):
iter_creation = self.iter_creation
def iterate_fct():
"iterate on rows"
iters = iter_creation()
for df, dfs in zip(iters, value):
if df.shape[0] != dfs.shape[0]:
raise RuntimeError(
"Chunksize or shape are different when "
"iterating on two StreamDataFrame at the same "
"time: %r != %r." % (df.shape[0], dfs.shape[0]))
dfc = df.copy()
dfc[index] = dfs
yield dfc
self.iter_creation = iterate_fct
else:
raise NotImplementedError(
"Not implemented for type(index)=%r and type(value)=%r." % (
type(index), type(value)))
def add_column(self, col, value):
"""
Implements some of the functionalities :epkg:`pandas`
offers for the operator ``[]``.
@param col new column
@param value @see cl StreamingDataFrame or a lambda function
@return @see cl StreamingDataFrame
..note::
If value is a @see cl StreamingDataFrame,
*chunksize* must be the same for both.
.. exref::
:title: Add a new column to a StreamingDataFrame
:tag: streaming
.. runpython::
:showcode:
from pandas import DataFrame
from pandas_streaming.df import StreamingDataFrame
df = DataFrame(data=dict(X=[4.5, 6, 7], Y=["a", "b", "c"]))
sdf = StreamingDataFrame.read_df(df)
sdf2 = sdf.add_column("d", lambda row: int(1))
print(sdf2.to_dataframe())
sdf2 = sdf.add_column("d", lambda row: int(1))
print(sdf2.to_dataframe())
"""
if not isinstance(col, str):
raise NotImplementedError(
"Only a column as a string is supported.")
if isfunction(value):
def iterate_fct(self, value, col):
"iterate on rows"
for df in self:
dfc = df.copy()
dfc.insert(dfc.shape[1], col, dfc.apply(value, axis=1))
yield dfc
return StreamingDataFrame(lambda: iterate_fct(self, value, col), **self.get_kwargs())
if isinstance(value, (pandas.Series, pandas.DataFrame, StreamingDataFrame)):
raise NotImplementedError(
"Unable set a new column based on a datadframe.")
def iterate_cst(self, value, col):
"iterate on rows"
for df in self:
dfc = df.copy()
dfc[col] = value
yield dfc
return StreamingDataFrame(
lambda: iterate_cst(self, value, col), **self.get_kwargs())
def fillna(self, **kwargs):
"""
Replaces the missing values, calls
:epkg:`pandas:DataFrame:fillna`.
@param kwargs see :epkg:`pandas:DataFrame:fillna`
@return @see cl StreamingDataFrame
.. warning::
The function does not check what happens at the
limit of every chunk of data. Anything but a constant value
will probably have an inconsistent behaviour.
"""
def iterate_na(self, **kwargs):
"iterate on rows"
if kwargs.get('inplace', True):
kwargs['inplace'] = True
for df in self:
df.fillna(**kwargs)
yield df
else:
for df in self:
yield df.fillna(**kwargs)
return StreamingDataFrame(
lambda: iterate_na(self, **kwargs), **self.get_kwargs())
def describe(self, percentiles=None, include=None, exclude=None,
datetime_is_numeric=False):
"""
Calls :epkg:`pandas:DataFrame:describe` on every piece
of the datasets. *percentiles* are not really accurate
but just an indication.
:param percentiles: see :epkg:`pandas:DataFrame:describe`
:param include: see :epkg:`pandas:DataFrame:describe`
:param exclude: see :epkg:`pandas:DataFrame:describe`
:param datetime_is_numeric: see :epkg:`pandas:DataFrame:describe`
:return: :epkg:`pandas:DataFrame:describe`
"""
merged = None
stack = []
notper = ['count', 'mean', 'std']
for df in self:
desc = df.describe(
percentiles=percentiles, include=include, exclude=exclude,
datetime_is_numeric=datetime_is_numeric)
count = desc.loc['count', :]
rows = [name for name in desc.index if name not in notper]
stack.append(desc.loc[rows, :])
if merged is None:
merged = desc
merged.loc['std', :] = (
merged.loc['std', :] ** 2 + merged.loc['mean', :] ** 2) * count
merged.loc['mean', :] *= count
else:
merged.loc['count', :] += desc.loc['count', :]
merged.loc['mean', :] += desc.loc['mean', :] * count
merged.loc['std', :] += (
desc.loc['std', :] ** 2 + desc.loc['mean', :] ** 2) * count
merged.loc['max', :] = numpy.maximum(
merged.loc['max', :], desc.loc['max', :])
merged.loc['min', :] = numpy.maximum(
merged.loc['min', :], desc.loc['min', :])
merged.loc['mean', :] /= merged.loc['count', :]
merged.loc['std', :] = (
merged.loc['std', :] / merged.loc['count', :] -
merged.loc['mean', :] ** 2) ** 0.5
values = pandas.concat(stack)
summary = values.describe(percentiles=percentiles,
datetime_is_numeric=datetime_is_numeric)
merged = merged.loc[notper, :]
rows = [name for name in summary.index if name not in notper]
summary = summary.loc[rows, :]
return | pandas.concat([merged, summary]) | pandas.concat |
import pandas as pd
import numpy as np
import os
from sim.Bus import Bus
from sim.Route import Route
from sim.Busstop import Bus_stop
from sim.Passenger import Passenger
import matplotlib.pyplot as plt
pd.options.mode.chained_assignment = None
def getBusRoute(data):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_trips = path + 'trips.txt'
_path_st = path + 'stop_times.txt'
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
stop_times.dropna(subset=['arrival_time'], inplace=True)
bus_routes = {}
trip_ids = set(stop_times['trip_id'])
try:
service_id = trips.iloc[np.random.randint(0, trips.shape[0])]['service_id']
trips = trips[trips['service_id'] == service_id]
except:
pass
# each route_id may correspond to multiple trip_id
for trip_id in trip_ids:
# A completely same route indicates the same shape_id in trip file, but this field is not 100% provided by opendata
try:
if 'shape_id' in trips.columns:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['shape_id'])
block_id = ''
dir = ''
else:
route_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['route_id'])
block_id = str(trips[trips['trip_id'] == trip_id].iloc[0]['block_id'])
dir = str(trips[trips['trip_id'] == trip_id].iloc[0]['trip_headsign'])
except:
continue
# Identifies a set of dates when service is available for one or more routes.
trip = stop_times[stop_times['trip_id'] == trip_id]
try:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format='%H:%M:%S')
except:
trip['arrival_time'] = pd.to_datetime(trip['arrival_time'], format="%Y-%m-%d %H:%M:%S")
trip = trip.sort_values(by='arrival_time')
trip_dist = trip.iloc[:]['shape_dist_traveled'].to_list()
if len(trip_dist) <= 0 or np.isnan(trip_dist[0]):
continue
schedule = ((trip.iloc[:]['arrival_time'].dt.hour * 60 + trip.iloc[:]['arrival_time'].dt.minute) * 60 +
trip.iloc[:]['arrival_time'].dt.second).to_list()
if len(schedule) <= 2 or np.isnan(schedule[0]):
continue
b = Bus(id=trip_id, route_id=route_id, stop_list=trip.iloc[:]['stop_id'].to_list(),
dispatch_time=schedule[0], block_id=block_id, dir=dir)
b.left_stop = []
b.speed = (trip_dist[1] - trip_dist[0]) / (schedule[1] - schedule[0])
b.c_speed = b.speed
for i in range(len(trip_dist)):
if str(b.stop_list[i]) in b.stop_dist:
b.left_stop.append(str(b.stop_list[i]) + '_' + str(i))
b.stop_dist[str(b.stop_list[i]) + '_' + str(i)] = trip_dist[i]
b.schedule[str(b.stop_list[i]) + '_' + str(i)] = schedule[i]
else:
b.left_stop.append(str(b.stop_list[i]))
b.stop_dist[str(b.stop_list[i])] = trip_dist[i]
b.schedule[str(b.stop_list[i])] = schedule[i]
b.stop_list = b.left_stop[:]
b.set()
if route_id in bus_routes:
bus_routes[route_id].append(b)
else:
bus_routes[route_id] = [b]
# Do not consider the route with only 1 trip
bus_routes_ = {}
for k, v in bus_routes.items():
if len(v) > 1:
bus_routes_[k] = v
return bus_routes_
def getStopList(data, read=0):
my_path = os.path.abspath(os.path.dirname(__file__))
path = my_path + "/data/" + data + "/"
_path_stops = path + 'stops.txt'
_path_st = path + 'stop_times.txt'
_path_trips = path + 'trips.txt'
stops = pd.DataFrame(pd.read_csv(_path_stops))
stop_times = pd.DataFrame(pd.read_csv(_path_st))
trips = pd.DataFrame(pd.read_csv(_path_trips))
stop_list = {}
select_stops = | pd.merge(stops, stop_times, on=['stop_id'], how='left') | pandas.merge |
# ----------------------------------------------------------------------------
# Copyright (c) 2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from qiime2.plugin.testing import TestPluginBase
from qiime2.plugins import rescript
import qiime2
import pandas as pd
import pandas.util.testing as pdt
from rescript.dereplicate import _backfill_taxonomy
import_data = qiime2.Artifact.import_data
class TestDerep(TestPluginBase):
package = 'rescript.tests'
def setUp(self):
super().setUp()
self.dereplicate = rescript.actions.dereplicate
self.seqs = import_data(
'FeatureData[Sequence]', self.get_data_path('derep-test.fasta'))
self.taxa = import_data(
'FeatureData[Taxonomy]', self.get_data_path('derep-taxa.tsv'))
self.seqsnumericids = import_data(
'FeatureData[Sequence]', self.get_data_path(
'derep-test-numericIDs.fasta'))
self.taxanumericids = import_data(
'FeatureData[Taxonomy]', self.get_data_path(
'derep-taxa-numericIDs.tsv'))
def test_dereplicate_uniq(self):
seqs, taxa, = self.dereplicate(
self.seqs, self.taxa, mode='uniq', rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__chondroitinus',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__brevis',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__damnosus',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei',
'C1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Pediococcus; s__acidilacti',
'A3': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B2': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
# use derep_prefix=True; should still obtain same result if the prefix
# seqs bear unique taxonomic labels, as seen in this test case
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='uniq',
derep_prefix=True,
rank_handles='disable')
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_uniq_99_perc(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='uniq',
perc_identity=0.99,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__chondroitinus',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__brevis',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__damnosus',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei',
'C1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Pediococcus; s__acidilacti',
'A3': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B2': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
# use derep_prefix=True; should still obtain same result if the prefix
# seqs bear unique taxonomic labels, as seen in this test case
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='uniq',
perc_identity=0.99, derep_prefix=True,
rank_handles='disable')
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_lca(self):
seqs, taxa, = self.dereplicate(
self.seqs, self.taxa, mode='lca', rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__damnosus',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei',
'C1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_super_lca_majority(self):
seqs, taxa, = self.dereplicate(
self.seqs, self.taxa, mode='super', rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__damnosus',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei',
'C1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Pediococcus; s__acidilacti'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_super_lca_majority_perc99(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='super',
perc_identity=0.99,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__acidilacti',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
# test that LCA taxonomy assignment works when derep_prefix=True
# here derep_prefix + LCA leads to collapsed C-group seqs + LCA taxonomy
def test_dereplicate_prefix_lca(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='lca',
derep_prefix=True,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_lca_99_perc(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='lca',
perc_identity=0.99,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_majority(self):
seqs, taxa, = self.dereplicate(
self.seqs, self.taxa, mode='majority', rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__damnosus',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei',
'C1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Pediococcus; s__acidilacti'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
# test that majority taxonomy assignment works when derep_prefix=True
# all C-group seqs should be merged, and P. acidilacti is the majority
def test_dereplicate_prefix_majority(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='majority',
derep_prefix=True,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__acidilacti',
'B1a': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__vaginalis',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_dereplicate_majority_perc99(self):
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='majority',
perc_identity=0.99,
rank_handles='disable')
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__alvei',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__casei',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Pediococcus; s__acidilacti',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
# the above tests check actual derep functionality; this test just makes
# sure that the same tests/modes above operate on numeric seq IDs, using
# the same test data above (with numeric IDs).
# See https://github.com/bokulich-lab/RESCRIPt/issues/49
def test_dereplicate_numericIDs(self):
self.dereplicate(self.seqsnumericids, self.taxanumericids, mode='uniq')
self.assertTrue(True)
self.dereplicate(self.seqsnumericids, self.taxanumericids, mode='lca')
self.assertTrue(True)
self.dereplicate(self.seqsnumericids, self.taxanumericids,
mode='majority')
self.assertTrue(True)
# Now test with backfilling. These parameters were chosen to set up a
# variety of backfill levels.
def test_dereplicate_lca_99_perc_backfill(self):
# note backfills SILVA-style rank handles by default, so we use default
seqs, taxa, = self.dereplicate(self.seqs, self.taxa, mode='lca',
perc_identity=0.99)
exp_taxa = pd.DataFrame({'Taxon': {
'A1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Bacillales; '
'f__Paenibacillaceae; g__Paenibacillus; s__',
'B1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__Lactobacillus; s__',
'C1': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales;'
' f__Lactobacillaceae; g__; s__',
'B1b': 'k__Bacteria; p__Firmicutes; c__Bacilli; o__Lactobacillales'
'; f__Lactobacillaceae; g__Lactobacillus; s__pseudocasei'}})
pdt.assert_frame_equal(taxa.view(pd.DataFrame).sort_index(),
exp_taxa.sort_index(), check_names=False)
pdt.assert_index_equal(seqs.view(pd.Series).sort_index().index,
exp_taxa.sort_index().index, check_names=False)
def test_backfill_taxonomy(self):
default_rank_handle = "d__; p__; c__; o__; f__; g__; s__"
def _backfill_series(series, rank_handles=default_rank_handle):
rank_handles = rank_handles.split(';')
return series.apply(_backfill_taxonomy, args=([rank_handles]))
taxa = self.taxa.view(pd.Series).sort_index()
exp_taxa = taxa.copy()
# note: taxonomy is unchanged if rank handles are shorter than taxon
backfilled_taxa = _backfill_series(taxa, "my;taxonomy;is;too;short")
pdt.assert_series_equal(backfilled_taxa, exp_taxa, check_names=False)
# manually backfill to match expected
exp_taxa.loc['C1b'] += '; g__; s__'
# backfill with defaults
backfilled_taxa = _backfill_series(taxa)
pdt.assert_series_equal(backfilled_taxa, exp_taxa, check_names=False)
# trim back arbitrarily to backfill again
trimmed_taxa = backfilled_taxa.apply(
lambda x: ';'.join(x.split(';')[:3]))
# manually backfill
exp_taxa = trimmed_taxa.apply(lambda x: x + '; o__; f__; g__; s__')
backfilled_taxa = _backfill_series(trimmed_taxa)
pdt.assert_series_equal(backfilled_taxa, exp_taxa, check_names=False)
# backfill to root
# note: taxon labels can never be empty, so this test covers cases
# where there is no classification beyond root/domain/kingdom
backfilled_taxa = _backfill_series(trimmed_taxa.apply(lambda x: 'd__'))
exp_taxa = trimmed_taxa.apply(lambda x: default_rank_handle)
pdt.assert_series_equal(backfilled_taxa, exp_taxa, check_names=False)
# backfill custom labels
custom_rank_handles = "p;e;a;n;u;t;s"
exp_taxa = trimmed_taxa.apply(lambda x: x + ';n;u;t;s')
backfilled_taxa = _backfill_series(trimmed_taxa, custom_rank_handles)
| pdt.assert_series_equal(backfilled_taxa, exp_taxa, check_names=False) | pandas.util.testing.assert_series_equal |
import unittest
from abc import ABC
import numpy as np
import pandas as pd
from toolbox.ml.ml_factor_calculation import ModelWrapper, calc_ml_factor, generate_indexes
from toolbox.utils.slice_holder import SliceHolder
class MyTestCase(unittest.TestCase):
def examples(self):
# index includes non trading days
# exactly 60 occurrences of each ticker
first = pd.Timestamp(year=2010, month=1, day=1)
self.date_index = pd.MultiIndex.from_product(
[pd.date_range(start=first, end=pd.Timestamp(year=2010, month=3, day=1)),
['BOB', 'JEFF', 'CARL']], names=['date', 'symbol'])
self.expected_index_e5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first, first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first, first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_e7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first, first + pd.Timedelta(days=37)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=52))),
(SliceHolder(first, first + pd.Timedelta(days=45)),
SliceHolder(first + pd.Timedelta(days=53), first + pd.Timedelta(days=59))),
]
self.expected_index_e5_10_30 = self.turn_to_datetime64(self.expected_index_e5_10_30)
self.expected_index_e7_8_30 = self.turn_to_datetime64(self.expected_index_e7_8_30)
self.expected_index_r5_10_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=40), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=5), first + pd.Timedelta(days=34)),
SliceHolder(first + pd.Timedelta(days=45), first + pd.Timedelta(days=49))),
(SliceHolder(first + pd.Timedelta(days=10), first + pd.Timedelta(days=39)),
SliceHolder(first + pd.Timedelta(days=50), first + pd.Timedelta(days=54))),
(SliceHolder(first + pd.Timedelta(days=15), first + pd.Timedelta(days=44)),
SliceHolder(first + pd.Timedelta(days=55), first + pd.Timedelta(days=59)))
]
self.expected_index_r7_8_30 = [
(SliceHolder(first, first + pd.Timedelta(days=29)),
SliceHolder(first + pd.Timedelta(days=37), first + pd.Timedelta(days=44))),
(SliceHolder(first + pd.Timedelta(days=8), first + pd.Timedelta(days=37)),
SliceHolder(first + | pd.Timedelta(days=45) | pandas.Timedelta |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from pandas.util.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
import cudf
from cudf.core import DataFrame, Series
from cudf.core.index import DatetimeIndex
from cudf.tests.utils import NUMERIC_TYPES, assert_eq
def data1():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def data2():
return pd.date_range("20010101", "20020215", freq="400h", name="times")
def timeseries_us_data():
return pd.date_range(
"2019-07-16 00:00:00",
"2019-07-16 00:00:01",
freq="5555us",
name="times",
)
def timestamp_ms_data():
return pd.Series(
[
"2019-07-16 00:00:00.333",
"2019-07-16 00:00:00.666",
"2019-07-16 00:00:00.888",
]
)
def timestamp_us_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333",
"2019-07-16 00:00:00.666666",
"2019-07-16 00:00:00.888888",
]
)
def timestamp_ns_data():
return pd.Series(
[
"2019-07-16 00:00:00.333333333",
"2019-07-16 00:00:00.666666666",
"2019-07-16 00:00:00.888888888",
]
)
def numerical_data():
return np.arange(1, 10)
fields = ["year", "month", "day", "hour", "minute", "second", "weekday"]
@pytest.mark.parametrize("data", [data1(), data2()])
def test_series(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
assert_eq(pd_data, gdf_data)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]"))
assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]"))
assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2)
assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2)
assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2)
assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2)
assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2)
@pytest.mark.parametrize(
"lhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"rhs_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype):
pd_data_1 = pd.Series(
pd.date_range("20010101", "20020215", freq="400h", name="times")
)
pd_data_2 = pd.Series(
pd.date_range("20010101", "20020215", freq="401h", name="times")
)
gdf_data_1 = Series(pd_data_1).astype(lhs_dtype)
gdf_data_2 = Series(pd_data_2).astype(rhs_dtype)
np_data_1 = np.array(pd_data_1).astype(lhs_dtype)
np_data_2 = np.array(pd_data_2).astype(rhs_dtype)
np.testing.assert_equal(np_data_1, gdf_data_1.to_array())
np.testing.assert_equal(np_data_2, gdf_data_2.to_array())
np.testing.assert_equal(
np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_array()
)
np.testing.assert_equal(
np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_array()
)
np.testing.assert_equal(
np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_array()
)
np.testing.assert_equal(
np.less_equal(np_data_1, np_data_2),
(gdf_data_1 <= gdf_data_2).to_array(),
)
np.testing.assert_equal(
np.greater_equal(np_data_1, np_data_2),
(gdf_data_1 >= gdf_data_2).to_array(),
)
@pytest.mark.parametrize("data", [data1(), data2()])
def test_dt_ops(data):
pd_data = pd.Series(data.copy())
gdf_data = Series(data.copy())
assert_eq(pd_data == pd_data, gdf_data == gdf_data)
assert_eq(pd_data < pd_data, gdf_data < gdf_data)
assert_eq(pd_data > pd_data, gdf_data > gdf_data)
# libgdf doesn't respect timezones
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_series(data, field):
pd_data = pd.Series(data.copy())
gdf_data = Series(pd_data)
base = getattr(pd_data.dt, field)
test = getattr(gdf_data.dt, field).to_pandas().astype("int64")
assert_series_equal(base, test)
@pytest.mark.parametrize("data", [data1()])
@pytest.mark.parametrize("field", fields)
def test_dt_index(data, field):
pd_data = data.copy()
gdf_data = DatetimeIndex(pd_data)
assert_index_equal(
getattr(gdf_data, field).to_pandas(), getattr(pd_data, field)
)
def test_setitem_datetime():
df = DataFrame()
df["date"] = pd.date_range("20010101", "20010105").values
assert np.issubdtype(df.date.dtype, np.datetime64)
def test_sort_datetime():
df = pd.DataFrame()
df["date"] = np.array(
[
np.datetime64("2016-11-20"),
np.datetime64("2020-11-20"),
np.datetime64("2019-11-20"),
np.datetime64("1918-11-20"),
np.datetime64("2118-11-20"),
]
)
df["vals"] = np.random.sample(len(df["date"]))
gdf = cudf.from_pandas(df)
s_df = df.sort_values(by="date")
s_gdf = gdf.sort_values(by="date")
assert_eq(s_df, s_gdf)
def test_issue_165():
df_pandas = pd.DataFrame()
start_date = dt.datetime.strptime("2000-10-21", "%Y-%m-%d")
data = [(start_date + dt.timedelta(days=x)) for x in range(6)]
df_pandas["dates"] = data
df_pandas["num"] = [1, 2, 3, 4, 5, 6]
df_cudf = DataFrame.from_pandas(df_pandas)
base = df_pandas.query("dates==@start_date")
test = df_cudf.query("dates==@start_date")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date
base_mask = df_pandas.dates == start_date
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_ts = pd.Timestamp(start_date)
test = df_cudf.query("dates==@start_date_ts")
base = df_pandas.query("dates==@start_date_ts")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_ts
base_mask = df_pandas.dates == start_date_ts
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
start_date_np = np.datetime64(start_date_ts, "ns")
test = df_cudf.query("dates==@start_date_np")
base = df_pandas.query("dates==@start_date_np")
assert_frame_equal(base, test.to_pandas())
assert len(test) > 0
mask = df_cudf.dates == start_date_np
base_mask = df_pandas.dates == start_date_np
assert_series_equal(mask.to_pandas(), base_mask, check_names=False)
assert mask.to_pandas().sum() > 0
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
def test_typecast_from_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(dtype)
gdf_casted = gdf_data.astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [data1(), data2()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_int64_to_datetime(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data)
gdf_data = Series(pd_data)
np_casted = np_data.astype(np.int64).astype(dtype)
gdf_casted = gdf_data.astype(np.int64).astype(dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [timeseries_us_data()])
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_different_datetime_resolutions(data, dtype):
pd_data = pd.Series(data.copy())
np_data = np.array(pd_data).astype(dtype)
gdf_series = Series(pd_data).astype(dtype)
np.testing.assert_equal(np_data, gdf_series.to_array())
@pytest.mark.parametrize(
"data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()]
)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_string_timstamp_typecast_to_different_datetime_resolutions(
data, dtype
):
pd_sr = data
gdf_sr = cudf.Series.from_pandas(pd_sr)
expect = pd_sr.values.astype(dtype)
got = gdf_sr.astype(dtype).values_host
np.testing.assert_equal(expect, got)
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_data.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("from_dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_to_from_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_data = Series(np_data)
np_casted = np_data.astype(to_dtype).astype(from_dtype)
gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize(
"from_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
@pytest.mark.parametrize(
"to_dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype):
np_data = data.astype(from_dtype)
gdf_col = Series(np_data)._column
np_casted = np_data.astype(to_dtype)
gdf_casted = gdf_col.astype(to_dtype)
np.testing.assert_equal(np_casted, gdf_casted.to_array())
@pytest.mark.parametrize("data", [numerical_data()])
@pytest.mark.parametrize("nulls", ["some", "all"])
def test_to_from_pandas_nulls(data, nulls):
pd_data = pd.Series(data.copy().astype("datetime64[ns]"))
if nulls == "some":
# Fill half the values with NaT
pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns")
elif nulls == "all":
# Fill all the values with NaT
pd_data[:] = np.datetime64("nat", "ns")
gdf_data = Series.from_pandas(pd_data)
expect = pd_data
got = gdf_data.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize(
"dtype",
["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"],
)
def test_datetime_to_arrow(dtype):
timestamp = (
cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={}
)
.reset_index()["timestamp"]
.reset_index(drop=True)
)
gdf = DataFrame({"timestamp": timestamp.astype(dtype)})
assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False)))
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize(
"nulls", ["none", pytest.param("some", marks=pytest.mark.xfail)]
)
def test_datetime_unique(data, nulls):
psr = pd.Series(data)
print(data)
print(nulls)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.unique()
got = gsr.unique()
assert_eq(pd.Series(expected), got.to_pandas())
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(pd.date_range("2010-01-01", "2010-02-01")),
pd.Series([None, None], dtype="datetime64[ns]"),
],
)
@pytest.mark.parametrize("nulls", ["none", "some"])
def test_datetime_nunique(data, nulls):
psr = pd.Series(data)
if len(data) > 0:
if nulls == "some":
p = np.random.randint(0, len(data), 2)
psr[p] = None
gsr = cudf.from_pandas(psr)
expected = psr.nunique()
got = gsr.nunique()
assert_eq(got, expected)
testdata = [
(
Series(
["2018-01-01", None, "2019-01-31", None, "2018-01-01"],
dtype="datetime64[ms]",
),
True,
),
(
Series(
[
"2018-01-01",
"2018-01-02",
"2019-01-31",
"2018-03-01",
"2018-01-01",
],
dtype="datetime64[ms]",
),
False,
),
(
Series(
np.array(
["2018-01-01", None, "2019-12-30"], dtype="datetime64[ms]"
)
),
True,
),
]
@pytest.mark.parametrize("data, expected", testdata)
def test_datetime_has_null_test(data, expected):
pd_data = data.to_pandas()
count = pd_data.notna().value_counts()
expected_count = 0
if False in count.keys():
expected_count = count[False]
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_has_null_test_pyarrow():
data = Series(
pa.array(
[0, np.iinfo("int64").min, np.iinfo("int64").max, None],
type=pa.timestamp("ns"),
)
)
expected = True
expected_count = 1
assert_eq(expected, data.has_nulls)
assert_eq(expected_count, data.null_count)
def test_datetime_dataframe():
data = {
"timearray": np.array(
[0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]"
)
}
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame(data)
assert_eq(pdf, gdf)
assert_eq(pdf.dropna(), gdf.dropna())
assert_eq(pdf.isnull(), gdf.isnull())
data = np.array([0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]")
gs = cudf.Series(data)
ps = pd.Series(data)
assert_eq(ps, gs)
assert_eq(ps.dropna(), gs.dropna())
assert_eq(ps.isnull(), gs.isnull())
@pytest.mark.parametrize(
"data",
[
None,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([0, 1, -1]),
pd.Series([0, 1, -1, 100.3, 200, 47637289]),
pd.Series(["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"]),
[1, 2, 3, 100, -123, -1, 0, 1000000000000679367],
pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}),
pd.DataFrame(
{"year": ["2015", "2016"], "month": ["2", "3"], "day": [4, 5]}
),
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0.5],
},
index=["a", "b"],
),
pd.DataFrame(
{
"year": [],
"month": [],
"day": [],
"minute": [],
"second": [],
"hour": [],
},
),
["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"],
pd.Index([1, 2, 3, 4]),
pd.DatetimeIndex(
["1970-01-01 00:00:00.000000001", "1970-01-01 00:00:00.000000002"],
dtype="datetime64[ns]",
freq=None,
),
pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None,),
pd.Series([1, 2, 3]).astype("datetime64[ns]"),
pd.Series([1, 2, 3]).astype("datetime64[us]"),
pd.Series([1, 2, 3]).astype("datetime64[ms]"),
pd.Series([1, 2, 3]).astype("datetime64[s]"),
pd.Series([1, 2, 3]).astype("datetime64[D]"),
1,
100,
17,
53.638435454,
np.array([1, 10, 15, 478925, 2327623467]),
np.array([0.3474673, -10, 15, 478925.34345, 2327623467]),
],
)
@pytest.mark.parametrize("dayfirst", [True, False])
@pytest.mark.parametrize("infer_datetime_format", [True, False])
def test_cudf_to_datetime(data, dayfirst, infer_datetime_format):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
if type(pd_data).__module__ == np.__name__:
gd_data = cp.array(pd_data)
else:
gd_data = pd_data
expected = pd.to_datetime(
pd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
actual = cudf.to_datetime(
gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format
)
assert_eq(actual, expected)
@pytest.mark.parametrize(
"data",
[
"2",
["1", "2", "3"],
["1/1/1", "2/2/2", "1"],
pd.DataFrame(
{
"year": [2015, 2016],
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
"blablacol": [1, 1],
}
),
pd.DataFrame(
{
"month": [2, 3],
"day": [4, 5],
"minute": [1, 100],
"second": [90, 10],
"hour": [1, 0],
}
),
],
)
def test_to_datetime_errors(data):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
gd_data = pd_data
try:
pd.to_datetime(pd_data)
except Exception as e:
with pytest.raises(type(e), match=re.escape(str(e))):
cudf.to_datetime(gd_data)
else:
raise AssertionError("Was expecting `pd.to_datetime` to fail")
def test_to_datetime_not_implemented():
with pytest.raises(NotImplementedError):
cudf.to_datetime([], exact=False)
with pytest.raises(NotImplementedError):
cudf.to_datetime([], origin="julian")
with pytest.raises(NotImplementedError):
cudf.to_datetime([], yearfirst=True)
@pytest.mark.parametrize(
"data",
[
1,
[],
pd.Series([]),
pd.Index([]),
pd.Series([1, 2, 3]),
pd.Series([1, 2.4, 3]),
pd.Series([0, 1, -1]),
pd.Series([0, 1, -1, 100, 200, 47637]),
[10, 12, 1200, 15003],
pd.DatetimeIndex([], dtype="datetime64[ns]", freq=None,),
pd.Index([1, 2, 3, 4]),
],
)
@pytest.mark.parametrize("unit", ["D", "s", "ms", "us", "ns"])
def test_to_datetime_units(data, unit):
pd_data = data
if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)):
gd_data = cudf.from_pandas(pd_data)
else:
gd_data = pd_data
expected = | pd.to_datetime(pd_data, unit=unit) | pandas.to_datetime |
import datetime
import random
import pandas as pd
from cryptofeed_werks.controllers.aggregators.trades.lib import (
aggregate_threshold,
aggregate_trades,
)
from .utils import get_data_frame, get_trade
def get_samples(trades):
return aggregate_trades(get_data_frame(trades))
def test_equal_symbols_and_timestamps_and_ticks():
trades = [{"symbol": "A", "is_equal_timestamp": True, "ticks": [1, 1]}]
samples = get_samples(trades)
assert len(samples) == 1
def test_equal_symbols_and_timestamps_and_not_equal_ticks():
trades = [{"symbol": "A", "is_equal_timestamp": True, "ticks": [1, -1]}]
samples = get_samples(trades)
assert len(samples) == 2
def test_not_equal_symbols_and_equal_timestamps_and_ticks():
trades = [
{"symbol": "A", "is_equal_timestamp": True, "ticks": [1, 1]},
{"symbol": "B", "is_equal_timestamp": True, "ticks": [1, 1]},
]
samples = get_samples(trades)
assert len(samples) == 2
def test_not_equal_symbols_and_timestamps_and_equal_ticks():
trades = [
{"symbol": "A", "is_equal_timestamp": True, "ticks": [1, 1]},
{"symbol": "A", "is_equal_timestamp": False, "ticks": [-1]},
{"symbol": "B", "is_equal_timestamp": True, "ticks": [1, 1]},
{"symbol": "B", "is_equal_timestamp": False, "ticks": [-1]},
]
samples = get_samples(trades)
assert len(samples) == 4
def test_equal_ticks_and_equal_timestamp():
trades = [{"ticks": [1, 1], "is_equal_timestamp": True}]
samples = get_samples(trades)
assert len(samples) == 1
def test_equal_ticks_and_not_equal_timestamp():
trades = [{"ticks": [1, 1], "is_equal_timestamp": False}]
samples = get_samples(trades)
assert len(samples) == 2
def test_equal_ticks_and_equal_nanoseconds():
trades = [
{
"ticks": [1, 1],
"is_equal_timestamp": True,
"nanoseconds": random.random() * 100,
}
]
samples = get_samples(trades)
assert len(samples) == 1
def test_equal_ticks_and_not_equal_nanoseconds():
trades = []
nanoseconds = random.random() * 100
for index, tick in enumerate([1, 1]):
nanoseconds += index
trade = {
"ticks": [tick],
"is_equal_timestamp": True,
"nanoseconds": nanoseconds,
}
trades.append(trade)
samples = get_samples(trades)
assert len(samples) == 2
def test_aggregate_threshold_four():
now = datetime.datetime.utcnow()
trades = [
get_trade(timestamp=now, price=1, notional=1, tick_rule=-1),
get_trade(
timestamp=now + pd.Timedelta("1s"), price=1, notional=1, tick_rule=-1
),
get_trade(
timestamp=now + +pd.Timedelta("2s"), price=1, notional=1, tick_rule=1
),
get_trade(
timestamp=now + +pd.Timedelta("2s"), price=1, notional=1, tick_rule=1
),
]
data_frame = pd.DataFrame(trades)
df = aggregate_trades(data_frame)
data = aggregate_threshold(df, attr="volume", value=2)
assert len(data) == 1
assert_significant(data[0])
assert_insignificant(data[0], buy=2, total=4)
def test_aggregate_threshold_eight():
now = datetime.datetime.utcnow()
trades = [
get_trade(timestamp=now, price=1, notional=1, tick_rule=1),
get_trade(timestamp=now, price=1, notional=1, tick_rule=1),
get_trade(
timestamp=now + | pd.Timedelta("1s") | pandas.Timedelta |
# -*- coding: utf-8 -*-
"""Script for formatting the Fantasia Database
The database consists of twenty young and twenty elderly healthy subjects. All subjects remained in a resting state in sinus rhythm while watching the movie Fantasia (Disney, 1940) to help maintain wakefulness. The continuous ECG signals were digitized at 250 Hz. Each heartbeat was annotated using an automated arrhythmia detection algorithm, and each beat annotation was verified by visual inspection.
Steps:
1. Download the ZIP database from https://physionet.org/content/fantasia/1.0.0/
2. Open it with a zip-opener (WinZip, 7zip).
3. Extract the folder of the same name (named 'fantasia-database-1.0.0') to the same folder as this script.
4. Run this script.
"""
import pandas as pd
import numpy as np
import wfdb
import os
files = os.listdir("./fantasia-database-1.0.0/")
files = [s.replace('.dat', '') for s in files if ".dat" in s]
dfs_ecg = []
dfs_rpeaks = []
for i, participant in enumerate(files):
data, info = wfdb.rdsamp("./fantasia-database-1.0.0/" + participant)
# Get signal
data = pd.DataFrame(data, columns=info["sig_name"])
data = data[["ECG"]]
data["Participant"] = "Fantasia_" + participant
data["Sample"] = range(len(data))
data["Sampling_Rate"] = info['fs']
data["Database"] = "Fantasia"
# Get annotations
anno = wfdb.rdann("./fantasia-database-1.0.0/" + participant, 'ecg')
anno = anno.sample[np.where(np.array(anno.symbol) == "N")[0]]
anno = pd.DataFrame({"Rpeaks": anno})
anno["Participant"] = "Fantasia_" + participant
anno["Sampling_Rate"] = info['fs']
anno["Database"] = "Fantasia"
# Store with the rest
dfs_ecg.append(data)
dfs_rpeaks.append(anno)
# Save
df_ecg = | pd.concat(dfs_ecg) | pandas.concat |
# -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# pandas 에는 Timestamp, DatetimeIndex, Period, PeriodIndex 클래스가 있다.
# 타임스탬프 형식들
print(pd.Timestamp('2/15/2019 07:20PM'))
print(pd.Timestamp('2019-02-15 07:20PM'))
# 한달 후 계산
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(months=1))
# 1월달로 설정
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(month=1))
# 10일 후 계산
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(days=10))
# 10일로 설정
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(day=10))
# datetime 형식으로 변경
dt = pd.Timestamp('2019-02-15 07:20PM').to_pydatetime()
print(dt.year)
print(dt.month)
print(dt.day)
print(dt.hour)
print(dt.minute)
print(dt.second)
print()
# period 는 일정 기간을 나타내는 형식으로, 특정 날이나, 달을 나타낸다.
# 'M' 달을 나타낸다.
print(pd.Period('02/2019'))
# 'D' 일을 나타낸다.
print(pd.Period('02/15/2019'))
print()
# timestamp 로 series 를 생성
t1 = pd.Series(list('abc'), [pd.Timestamp(
'2016-09-01'), | pd.Timestamp('2016-09-02') | pandas.Timestamp |
from typing import List
import pandas as pd
import geopandas as gpd
from pandas import ExcelFile
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def main():
excel_data: ExcelFile = ExcelFile("FMDdata.xlsx")
orig_report_years: List[pd.DataFrame] = []
report_years: List[pd.DataFrame] = []
for year in range(7, 17):
orig_report_years.append(pd.read_excel(excel_data, sheet_name=f'20{year:02}'))
year = 2007
for yearly_report in orig_report_years:
yearly_report = yearly_report.rename(columns={"Country name": "NAME"})
yearly_report = yearly_report.groupby(["NAME"], as_index=False).sum()
yearly_report['Year'] = year
report_years.append(yearly_report)
year += 1
world = gpd.read_file("natural_earth_vector/10m_cultural/ne_10m_admin_0_countries.shp")
world_fmd_report: List[gpd.GeoDataFrame] = []
for report in report_years:
report["NAME"].replace(to_replace="China (People's Rep. of)", value="China", inplace=True)
report["NAME"].replace(to_replace="Congo (Dem. Rep. of the)", value="Dem. Rep. Congo", inplace=True)
report["NAME"].replace(to_replace="Korea (Dem. People's Rep.)", value="North Korea", inplace=True)
report["NAME"].replace(to_replace="Palestinian Auton. Territories", value="Palestine", inplace=True)
report["NAME"].replace(to_replace="Chinese Taipei", value="Taiwan", inplace=True)
report["NAME"].replace(to_replace="Hong Kong (SAR - PRC)", value="Hong Kong", inplace=True)
report["NAME"].replace(to_replace="Korea (Rep. of)", value="South Korea", inplace=True)
map: gpd.GeoDataFrame = world.copy()
map_disease: gpd.GeoDataFrame = | pd.merge(left=map, right=report, how="outer", on="NAME") | pandas.merge |
"""The noisemodels module contains all noisemodels available in Pastas.
Author: <NAME>, 2017
"""
from abc import ABC
from logging import getLogger
import numpy as np
import pandas as pd
from .decorators import set_parameter
logger = getLogger(__name__)
__all__ = ["NoiseModel", "NoiseModel2"]
class NoiseModelBase(ABC):
_name = "NoiseModelBase"
def __init__(self):
self.nparam = 0
self.name = "noise"
self.parameters = pd.DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
def set_init_parameters(self, oseries=None):
if oseries is not None:
pinit = oseries.index.to_series().diff() / pd.Timedelta(1, "d")
pinit = pinit.median()
else:
pinit = 14.0
self.parameters.loc["noise_alpha"] = (pinit, 0, 5000, True, "noise")
@set_parameter
def set_initial(self, name, value):
"""Internal method to set the initial parameter value
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "initial"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_pmin(self, name, value):
"""Internal method to set the minimum value of the noisemodel.
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "pmin"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_pmax(self, name, value):
"""Internal method to set the maximum parameter values.
Notes
-----
The preferred method for parameter setting is through the model.
"""
if name in self.parameters.index:
self.parameters.loc[name, "pmax"] = value
else:
print("Warning:", name, "does not exist")
@set_parameter
def set_vary(self, name, value):
"""Internal method to set if the parameter is varied during
optimization.
Notes
-----
The preferred method for parameter setting is through the model.
"""
self.parameters.loc[name, "vary"] = value
def to_dict(self):
return {"type": self._name}
class NoiseModel(NoiseModelBase):
"""Noise model with exponential decay of the residual and
weighting with the time step between observations.
Notes
-----
Calculates the noise [1]_ according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (t1 - t0) / alpha)
Note that in the referenced paper, alpha is defined as the inverse of
alpha used in Pastas. The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
References
----------
.. [1] <NAME>, <NAME>., and <NAME> (2005), Modeling irregularly spaced residual series as a continuous stochastic process, Water Resour. Res., 41, W12404, doi:10.1029/2004WR003726.
"""
_name = "NoiseModel"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
def simulate(self, res, parameters):
"""
Parameters
----------
res : pandas.Series
The residual series.
parameters : array-like, optional
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / pd.Timedelta("1d")
# res.values is needed else it gets messed up with the dates
v = res.values[1:] - np.exp(-odelt / alpha) * res.values[:-1]
res.iloc[1:] = v * self.weights(alpha, odelt)
res.iloc[0] = 0
res.name = "Noise"
return res
@staticmethod
def weights(alpha, odelt):
"""Method to calculate the weights for the noise based on the
sum of weighted squared noise (SWSI) method.
Parameters
----------
alpha
odelt:
Returns
-------
"""
# divide power by 2 as nu / sigma is returned
power = 1.0 / (2.0 * odelt.size)
exp = np.exp(-2.0 / alpha * odelt) # Twice as fast as 2*odelt/alpha
w = np.exp(power * np.sum(np.log(1.0 - exp))) / np.sqrt(1.0 - exp)
return w
class NoiseModel2(NoiseModelBase):
"""
Noise model with exponential decay of the residual.
Notes
-----
Calculates the noise according to:
.. math::
v(t1) = r(t1) - r(t0) * exp(- (t1 - t0) / alpha)
The unit of the alpha parameter is always in days.
Examples
--------
It can happen that the noisemodel is used during model calibration
to explain most of the variation in the data. A recommended solution is to
scale the initial parameter with the model timestep, E.g.::
>>> n = NoiseModel()
>>> n.set_initial("noise_alpha", 1.0 * ml.get_dt(ml.freq))
"""
_name = "NoiseModel2"
def __init__(self):
NoiseModelBase.__init__(self)
self.nparam = 1
self.set_init_parameters()
@staticmethod
def simulate(res, parameters):
"""
Parameters
----------
res : pandas.Series
The residual series.
parameters : array_like, optional
Alpha parameters used by the noisemodel.
Returns
-------
noise: pandas.Series
Series of the noise.
"""
alpha = parameters[0]
odelt = (res.index[1:] - res.index[:-1]).values / | pd.Timedelta("1d") | pandas.Timedelta |
import sys
import numpy as np
import pandas as pd
import json
import os
from joblib import Parallel, delayed
from gtad_lib import opts
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def get_infer_dict(opt):
df = | pd.read_csv(opt["video_info"]) | pandas.read_csv |
import os
import glob
import csv
import pandas as pd
import numpy as np
def save_as_csv():
"""
Saves all raw txt files as separate csvs to later merge.
"""
# Biomedical data:
read_aliquot = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_aliquot_ov.txt",
sep="\t",
)
read_aliquot.drop(0, 0, inplace=True)
read_aliquot.to_csv(r"./01_csv_data/biomed_aliquot.csv", index=None)
no_duplicates_df = read_aliquot.drop_duplicates(subset=["bcr_patient_uuid"])
no_duplicates_df.to_csv("./01_csv_data/biomed_aliquot_short.csv", index=None)
read_analyte = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_analyte_ov.txt",
sep="\t",
)
read_analyte.drop(0, 0, inplace=True)
read_analyte.to_csv(r"./01_csv_data/biomed_analyte.csv", index=None)
read_diag_slides = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_diagnostic_slides_ov.txt",
sep="\t",
)
read_diag_slides.drop(0, 0, inplace=True)
read_diag_slides.to_csv(r"./01_csv_data/biomed_diag_slides.csv", index=None)
read_portion = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_portion_ov.txt",
sep="\t",
)
read_portion.drop(0, 0, inplace=True)
read_portion.to_csv(r"./01_csv_data/biomed_portion.csv", index=None)
read_protocol = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_protocol_ov.txt",
sep="\t",
)
read_protocol.drop(0, 0, inplace=True)
read_protocol.to_csv(r"./01_csv_data/biomed_protocol.csv", index=None)
read_sample = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_sample_ov.txt",
sep="\t",
)
read_sample.drop(0, 0, inplace=True)
read_sample.to_csv(r"./01_csv_data/biomed_sample.csv", index=None)
read_shipment_portion = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_shipment_portion_ov.txt",
sep="\t",
)
read_shipment_portion.drop(0, 0, inplace=True)
read_shipment_portion.to_csv(
r"./01_csv_data/biomed_shipment_portion.csv", index=None
)
read_slide = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_biospecimen_slide_ov.txt",
sep="\t",
)
read_slide.drop(0, 0, inplace=True)
read_slide.to_csv(r"./01_csv_data/biomed_slide.csv", index=None)
read_ssf_norm = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_ssf_normal_controls_ov.txt",
sep="\t",
)
read_ssf_norm.drop([0, 1], 0, inplace=True)
read_ssf_norm.to_csv(r"./01_csv_data/biomed_ssf_norm.csv", index=None)
read_ssf_tumor = pd.read_csv(
r"./00_raw_data/gdc_download_biomedical_ov/nationwidechildrens.org_ssf_tumor_samples_ov.txt",
sep="\t",
)
read_ssf_tumor.drop([0, 1], 0, inplace=True)
read_ssf_tumor.to_csv(r"./01_csv_data/biomed_ssf_tumor.csv", index=None)
# Clinical data:
read_drug = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_drug_ov.txt",
sep="\t",
)
read_drug.drop([0, 1], 0, inplace=True)
read_drug.to_csv(r"./01_csv_data/clinical_drug.csv", index=None)
read_v1_nte = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_follow_up_v1.0_nte_ov.txt",
sep="\t",
)
read_v1_nte.drop([0, 1], 0, inplace=True)
read_v1_nte.to_csv(r"./01_csv_data/clinical_v1_nte.csv", index=None)
read_v1 = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_follow_up_v1.0_ov.txt",
sep="\t",
)
read_v1.drop([0, 1], 0, inplace=True)
read_v1.to_csv(r"./01_csv_data/clinical_v1.csv", index=None)
read_nte = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_nte_ov.txt",
sep="\t",
)
read_nte.drop([0, 1], 0, inplace=True)
read_nte.to_csv(r"./01_csv_data/clinical_nte.csv", index=None)
read_omf_v4 = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_omf_v4.0_ov.txt",
sep="\t",
)
read_omf_v4.drop([0, 1], 0, inplace=True)
read_omf_v4.to_csv(r"./01_csv_data/clinical_omf_v4.csv", index=None)
read_patient = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_patient_ov.txt",
sep="\t",
)
read_patient.drop([0, 1], 0, inplace=True)
read_patient.to_csv(r"./01_csv_data/clinical_patient.csv", index=None)
read_radiation = pd.read_csv(
r"./00_raw_data/gdc_download_clinical_ov/nationwidechildrens.org_clinical_radiation_ov.txt",
sep="\t",
)
read_radiation.drop([0, 1], 0, inplace=True)
read_radiation.to_csv(r"./01_csv_data/clinical_radiation.csv", index=None)
def merge_csv():
"""
Concatenates all csvs into a single merged_bioclin_data.csv
"""
path = "./01_csv_data"
all_files = glob.glob(os.path.join(path, "*.csv"))
all_df = []
for f in all_files:
df = | pd.read_csv(f, sep=",") | pandas.read_csv |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with | tm.assert_produces_warning(warning) | pandas.util.testing.assert_produces_warning |
import torch
from tqdm import tqdm # for displaying progress bar
import os
import pandas as pd
from models import EmissionModel, TransitionModel, HMM
import numpy as np
class Trainer:
def __init__(self, model, config, lr):
self.model = model
self.config = config
self.lr = lr
self.optimizer = torch.optim.Adam(model.parameters(), lr=self.lr, weight_decay=0.00001)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
self.train_df = pd.DataFrame(columns=["loss","lr"])
self.valid_df = | pd.DataFrame(columns=["loss","lr"]) | pandas.DataFrame |
import pandas as pd
from flask import Flask
from flask_restful import Resource, Api
import sqlite3
import json
import os
app = Flask(__name__)
api = Api(app)
class AppGenreController(Resource):
def get(self):
data = pd.read_csv(".\DataFiles\AppleStore.csv")
data_music_and_book = data.loc[(data['prime_genre']=='Music') | (data['prime_genre']=='Book')].sort_values(['rating_count_tot'],ascending=False).head(n=10)
db = sqlite3.connect('.\DataFiles\db')
db.text_factory = str
df = pd.DataFrame({'id':data_music_and_book.id,
'track_name': data_music_and_book.track_name,
'n_citacoes':data_music_and_book.rating_count_tot,
'size_bytes':data_music_and_book.rating_count_tot,
'price':data_music_and_book.price,
'prime_genre':data_music_and_book.prime_genre
}
)
df.to_csv(os.path.join('.\DataFiles', 'rating_genre.csv') , sep=',', encoding='utf-8',index=False)
df.to_sql("rating_genre", db, if_exists="replace")
dfreader = | pd.read_sql("select id,track_name,n_citacoes,size_bytes,price,prime_genre from rating_genre", db) | pandas.read_sql |
import numpy as np
import pytest
import pandas as pd
from pandas.core.sorting import nargsort
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMethodsTests(BaseExtensionTests):
"""Various Series and DataFrame methods."""
@pytest.mark.parametrize('dropna', [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(
dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
def test_count(self, data_missing):
df = pd.DataFrame({"A": data_missing})
result = df.count(axis='columns')
expected = pd.Series([0, 1])
self.assert_series_equal(result, expected)
def test_series_count(self, data_missing):
# GH#26835
ser = pd.Series(data_missing)
result = ser.count()
expected = 1
assert result == expected
def test_apply_simple_series(self, data):
result = pd.Series(data).apply(id)
assert isinstance(result, pd.Series)
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('na_position, expected', [
('last', np.array([2, 0, 1], dtype=np.dtype('intp'))),
('first', np.array([1, 2, 0], dtype=np.dtype('intp')))
])
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
# GH 25439
result = nargsort(data_missing_for_sorting, na_position=na_position)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1],
"B": data_for_sorting})
result = df.sort_values(['A', 'B'])
expected = pd.DataFrame({"A": [1, 1, 2],
'B': data_for_sorting.take([2, 0, 1])},
index=[2, 0, 1])
self.assert_frame_equal(result, expected)
@pytest.mark.parametrize('box', [pd.Series, lambda x: x])
@pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
labels, uniques = pd.factorize(data_for_grouping,
na_sentinel=na_sentinel)
expected_labels = np.array([0, 0, na_sentinel,
na_sentinel, 1, 1, 0, 2],
dtype=np.intp)
expected_uniques = data_for_grouping.take([0, 4, 7])
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
l1, u1 = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
l2, u2 = data_for_grouping.factorize(na_sentinel=na_sentinel)
tm.assert_numpy_array_equal(l1, l2)
self.assert_extension_array_equal(u1, u2)
def test_factorize_empty(self, data):
labels, uniques = pd.factorize(data[:0])
expected_labels = np.array([], dtype=np.intp)
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= b for (a, b) in
zip(list(orig_data1), list(orig_data2))])
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated):
# GH 20825
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
with np.errstate(over='ignore'):
expected = pd.Series(
orig_data1._from_sequence([a + b for (a, b) in
zip(list(orig_data1),
list(orig_data2))]))
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series(
orig_data1._from_sequence([a + val for a in list(orig_data1)]))
self.assert_series_equal(result, expected)
def test_combine_first(self, data):
# https://github.com/pandas-dev/pandas/issues/24147
a = pd.Series(data[:3])
b = pd.Series(data[2:5], index=[2, 3, 4])
result = a.combine_first(b)
expected = pd.Series(data[:5])
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('frame', [True, False])
@pytest.mark.parametrize('periods, indices', [
(-2, [2, 3, 4, -1, -1]),
(0, [0, 1, 2, 3, 4]),
(2, [-1, -1, 0, 1, 2]),
])
def test_container_shift(self, data, frame, periods, indices):
# https://github.com/pandas-dev/pandas/issues/22386
subset = data[:5]
data = pd.Series(subset, name='A')
expected = pd.Series(subset.take(indices, allow_fill=True), name='A')
if frame:
result = data.to_frame(name='A').assign(B=1).shift(periods)
expected = pd.concat([
expected,
pd.Series([1] * 5, name='B').shift(periods)
], axis=1)
compare = self.assert_frame_equal
else:
result = data.shift(periods)
compare = self.assert_series_equal
compare(result, expected)
@pytest.mark.parametrize('periods, indices', [
[-4, [-1, -1]],
[-1, [1, -1]],
[0, [0, 1]],
[1, [-1, 0]],
[4, [-1, -1]]
])
def test_shift_non_empty_array(self, data, periods, indices):
# https://github.com/pandas-dev/pandas/issues/23911
subset = data[:2]
result = subset.shift(periods)
expected = subset.take(indices, allow_fill=True)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize('periods', [
-4, -1, 0, 1, 4
])
def test_shift_empty_array(self, data, periods):
# https://github.com/pandas-dev/pandas/issues/23911
empty = data[:0]
result = empty.shift(periods)
expected = empty
self.assert_extension_array_equal(result, expected)
def test_shift_fill_value(self, data):
arr = data[:4]
fill_value = data[0]
result = arr.shift(1, fill_value=fill_value)
expected = data.take([0, 0, 1, 2])
self.assert_extension_array_equal(result, expected)
result = arr.shift(-2, fill_value=fill_value)
expected = data.take([2, 3, 0, 0])
self.assert_extension_array_equal(result, expected)
def test_hash_pandas_object_works(self, data, as_frame):
# https://github.com/pandas-dev/pandas/issues/23066
data = pd.Series(data)
if as_frame:
data = data.to_frame()
a = pd.util.hash_pandas_object(data)
b = pd.util.hash_pandas_object(data)
self.assert_equal(a, b)
def test_searchsorted(self, data_for_sorting, as_series):
b, c, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b, c])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
assert arr.searchsorted(c) == 2
assert arr.searchsorted(c, side="right") == 3
result = arr.searchsorted(arr.take([0, 2]))
expected = np.array([0, 2], dtype=np.intp)
| tm.assert_numpy_array_equal(result, expected) | pandas.util.testing.assert_numpy_array_equal |
# import libs
import pandas as pd
import quandl
from datetime import timedelta
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from datetime import datetime
from datetime import timedelta
import warnings
warnings.filterwarnings("ignore")
# main class cryptocast
class cryptocast:
# method to init
def __init__(self):
# add coin info to self
self.coin = 'btc'
# add slot for data
self.data = None
# method to get the data
def get_data(self):
# read in api_key
with open('cryptocast/api_key.txt') as key_file:
api_key = key_file.read()
# set quandl config
quandl.ApiConfig.api_key = api_key
# get the dataset
raw_df = quandl.get('BITFINEX/BTCEUR')
# store data
self.data = raw_df
# method to prep data
def prep_data(self):
# load data
prep_df = self.data
# reset the index
prep_df.reset_index(inplace = True)
# select relevant vars
prep_df = prep_df[['Date', 'High', 'Volume', 'Ask']]
# declare date format
prep_df.Date = | pd.to_datetime(prep_df.Date) | pandas.to_datetime |
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = | DataFrame({"B": [0, 1, 2, np.nan, 4]}) | pandas.DataFrame |
import torch
from quilt3 import Package
from collections import defaultdict
from torch.utils.data import Dataset
import pandas as pd
import os
import json
from featuredb import FeatureDatabase
from sklearn import preprocessing
import numpy as np
from sklearn.decomposition import PCA
class QuiltAicsFeatures(Dataset):
def __init__(
self,
num_batches,
BATCH_SIZE,
model_kwargs,
shuffle=True,
corr=False,
train=True,
mask=False
):
"""
Args:
num_batches: Number of batches of synthetic data
BATCH_SIZE: batchsize of synthetic data
model_kwargs: dictionary containing "x_dim"
which indicates input data size
shuffle: True sets condition vector in input data to 0
for all possible permutations
corr: True sets dependent input dimensions
via a correlation matrix
"""
self.num_batches = num_batches
self.BATCH_SIZE = BATCH_SIZE
self.corr = corr
self.shuffle = shuffle
self.model_kwargs = model_kwargs
self.train = train
Batches_C_train, Batches_C_test = torch.empty([0]), torch.empty([0])
Batches_X_train, Batches_X_test = torch.empty([0]), torch.empty([0])
Batches_conds_train, Batches_conds_test = torch.empty([0]), torch.empty([0])
ds = Package.browse(
"aics/pipeline_integrated_single_cell",
"s3://allencell"
)
# Specify path to pre downloaded quilt json files
try:
path_to_json = model_kwargs['json_quilt_path']
except:
path_to_json = "/home/ritvik.vasan/test/"
# json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
meta_to_file_name = []
for f in ds["cell_features"]:
meta_to_file_name.append(
{
"filename": f,
**ds["cell_features"][f].meta
}
)
metas = pd.DataFrame(meta_to_file_name)
# Specify path to config file for FeatureDatabase
try:
db = FeatureDatabase(model_kwargs['config_path'])
except:
db = FeatureDatabase("/home/ritvik.vasan/config.json")
t = db.get_pg_table(
"featuresets",
"aics-mitosis-classifier-four-stage_v1.0.0"
)
semi = metas.merge(
t,
left_on="CellId",
right_on="CellId",
suffixes=("_meta", "_mito")
)
# Only interphase or no interphase
semi['Interphase and Mitotic Stages [stage]'] = semi[
'Interphase and Mitotic Stages [stage]'
].apply(lambda x: 0 if x == 0.0 else 1)
dd = defaultdict(list)
for i in range(len(semi['filename'])):
this_file = semi['filename'][i]
a = json.loads(open(path_to_json + this_file).read())
a = dict(
[
(key, value) for key, value in a.items()
if key not in [
'imsize_orig',
'com',
'angle',
'flipdim',
'imsize_registered'
]
]
)
a.update({'CellId': semi['CellId'][i]})
for key, value in a.items():
dd[key].append(value)
features_plus_cellid = pd.DataFrame(dict(dd))
meta_plus_features = pd.merge(
semi,
features_plus_cellid,
on='CellId'
)
i_care_cols = [
c for c in meta_plus_features.columns
if c not in [
'CellId',
'CellIndex',
'FOVId',
'WellId',
'FeatureExplorerURL',
'CellLine',
'Workflow',
'associates',
'filename',
'NucMembSegmentationAlgorithm',
'NucMembSegmentationAlgorithmVersion',
'PlateId'
]
]
meta_plus_features = meta_plus_features[i_care_cols]
meta_plus_features.dropna(inplace=True)
categorical_features = [
'Gene',
'ProteinDisplayName',
'StructureDisplayName'
]
categorical_dataframe = meta_plus_features[categorical_features]
non_categorical_dataframe = meta_plus_features[
[
c for c in meta_plus_features.columns
if c not in categorical_features
]
]
one_hot_categorical_features = pd.get_dummies(
categorical_dataframe,
prefix=None,
drop_first=True
)
# num_of_cells = len(non_categorical_dataframe)
# This is mean, std normalization
non_categorical_dataframe = non_categorical_dataframe.iloc[:, :]
# print(non_categorical_dataframe.shape)
self._feature_names = [
i for i in non_categorical_dataframe.columns
] + [
i for i in one_hot_categorical_features.columns
]
num_training_samples = 33000
x = non_categorical_dataframe.values
std_scaler = preprocessing.StandardScaler()
# 0 is binary, dont scale that column
x_train_and_test_scaled = std_scaler.fit_transform(
x[:, 1:model_kwargs["x_dim"]+1]
)
x_train_scaled = std_scaler.fit_transform(
x[:num_training_samples, 1:model_kwargs["x_dim"]+1]
)
x_test_scaled = std_scaler.transform(
x[num_training_samples:, 1:model_kwargs["x_dim"]+1]
)
if model_kwargs["x_dim"] > 103:
non_categorical_train = pd.DataFrame(
np.concatenate((x[:num_training_samples, 0:1], x_train_scaled), axis=1)
)
non_categorical_test = pd.DataFrame(
np.concatenate((x[num_training_samples:, 0:1], x_test_scaled), axis=1)
)
non_categorical_train_and_test = pd.DataFrame(
np.concatenate((x[:, 0:1], x_train_and_test_scaled), axis=1)
)
# print(non_categorical_train.shape, non_categorical_test.shape)
# print(len(self._feature_names))
# print(non_categorical_train_and_test.shape)
non_categorical_train_and_test.columns = self._feature_names[:103]
else:
non_categorical_train = pd.DataFrame(x_train_scaled)
non_categorical_test = | pd.DataFrame(x_test_scaled) | pandas.DataFrame |
from taller1.models import Userid_Profile, Userid_ProfileCalculado
import pandas as pd
from collections import defaultdict
import psycopg2
import sqlalchemy
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import math
from scipy.stats import pearsonr
from django.db import connection
#
class CorrelacionPearson():
def listaUsuariosSimilares(self,usuario_activo,perfiles):
lista_similares=[]
df_perfiles = pd.DataFrame(list(perfiles.values()))
genderNumber = SimilitudCoseno.transforGender(self,df_perfiles)
countryNumber = SimilitudCoseno.transforCountry(self,df_perfiles)
df_perfiles = SimilitudCoseno.procesarDatos(self,df_perfiles,genderNumber,countryNumber)
df_usuario_activo= | pd.DataFrame(data=[[usuario_activo.userid, usuario_activo.gender ,usuario_activo.age,usuario_activo.country ,usuario_activo.registered ,genderNumber[usuario_activo.gender],countryNumber[usuario_activo.country] ]], columns=df_perfiles.columns) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from transformers import AutoTokenizer
from nlstruct.collections import Batcher
from nlstruct.dataloaders import load_quaero
from nlstruct.environment import cached, root
from nlstruct.text import apply_substitutions, huggingface_tokenize
from nlstruct.utils import normalize_vocabularies, df_to_csr, encode_ids
@cached
def preprocess_training_data(
bert_name,
umls_versions=["2014AB"],
add_quaero_splits=None,
n_repeat_quaero_corpus=1,
source_lat=["FRE"],
source_full_lexicon=False,
other_lat=["ENG"],
other_full_lexicon=False,
other_sabs=None,
other_additional_labels=None,
other_mirror_existing_labels=True,
sty_groups=['ANAT', 'CHEM', 'DEVI', 'DISO', 'GEOG', 'LIVB', 'OBJC', 'PHEN', 'PHYS', 'PROC'],
filter_before_preprocess=None,
subs=None,
max_length=100,
apply_unidecode=False,
prepend_labels=[],
mentions=None,
vocabularies=None,
return_raw_mentions=False,
):
"""
Build and preprocess the training data
Parameters
----------
bert_name: str
Name of the bert tokenizer
umls_versions: list of str
UMLS versions (ex ["2014AB"])
add_quaero_splits: list of str
Add the mentions from these quaero splits
n_repeat_quaero_corpus: int
Number of time the quaero corpus mentions should be repeated
source_lat: list of str
Source languages
source_full_lexicon: bool
Add all french umls synonyms
other_lat: list of str
Other languages
other_full_lexicon: bool
Add all english umls synonyms
other_sabs: list of str
Filter only by these sources when querying the english umls
other_additional_labels: list of str
Query those additional labels in the english umls
other_mirror_existing_labels: bool
Query previously added concepts (french, quaero, etc) in the english umls
sty_groups: list of str
If given, filter the lexicons to only keep concepts that are in those groups
filter_before_preprocess: str
Apply a final filtering before deduplicating the mentions and preprocessing them
subs: list of (str, str)
Substitutions to perform on mentions
apply_unidecode: bool
Apply unidecode module on mentions
max_length: int
Cut mentions that are longer than this number of tokens (not wordpieces)
prepend_labels: list of str
Add these virtual (?) labels at the beginning of the vocabulary
mentions: pd.DataFrame
Clean/tokenize these mentions instead of the ones built using this function parameters
vocabularies: dict
Base vocabularies if any
return_raw_mentions: bool
Return the untokenized, raw selected mentions only
Returns
-------
Batcher, Dict[str; np.ndarray], transformers.Tokenizer, pd.DataFrame, pd.DataFrame
Training batcher,
Vocabularies
Huggingface tokenizer
Raw mentions
Mention ids to unique coded id mapping
"""
if mentions is None:
assert not (other_full_lexicon and other_sabs is None and (other_mirror_existing_labels or other_additional_labels is not None))
mrconso = None
if any([source_full_lexicon, other_full_lexicon, other_additional_labels, other_mirror_existing_labels]):
sty_groups_mapping = pd.read_csv(root.resource(f"umls/sty_groups.tsv"), sep='|', header=None, index_col=False, names=["group", "sty"])
if sty_groups is not None:
sty_groups_mapping = sty_groups_mapping[sty_groups_mapping.group.isin(sty_groups)]
### Load the UMLS extract
# MRCONSO
logging.info("Loading MRCONSO...")
mrconso = []
for version in umls_versions:
mrconso_version = pd.read_csv(
root.resource(f"umls/{version}/MRCONSO.RRF"),
sep="|",
header=None,
index_col=False,
names=["CUI", "LAT", "TS", "LUI", "STT", "SUI", "ISPREF", "AUI", "SAUI", "SCUI", "SDUI", "SAB", "TTY", "CODE", "STR", "SRL", "SUPPRESS", "CVF"],
usecols=["CUI", "STR", "LAT", "SAB"],
).rename({"CUI": "label", "STR": "text"}, axis=1)
mrsty = pd.read_csv(
root.resource(f"umls/{version}/MRSTY.RRF"),
sep="|",
header=None,
index_col=False,
names=["CUI", "TUI", "STN", "STY", "ATUI", "CVF"],
usecols=["CUI", "STY"],
).rename({"CUI": "label", "STY": "sty"}, axis=1)
mrsty = mrsty.merge(sty_groups_mapping, on="sty")
mrsty = mrsty.drop_duplicates(['label', 'group'])
mrconso_version = mrconso_version.merge(mrsty)
mrconso.append(mrconso_version)
del mrconso_version, mrsty
mrconso = | pd.concat(mrconso) | pandas.concat |
"""Build a dataframe in pandas."""
import os
import pandas as pd
def symbol_to_path(symbol, base_dir="../data/"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = | pd.DataFrame(index=dates) | pandas.DataFrame |
#!/usr/bin/env python
# coding=utf-8
"""
@version: 0.1
@author: li
@file: factor_solvency.py
@time: 2019-01-28 11:33
"""
import gc, six
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
from utilities.calc_tools import CalcTools
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
@six.add_metaclass(Singleton)
class FactorSolvency(object):
"""
偿债能力
"""
def __init__(self):
__str__ = 'factor_solvency'
self.name = '财务指标'
self.factor_type1 = '财务指标'
self.factor_type2 = '偿债能力'
self.description = '财务指标的二级指标-偿债能力'
@staticmethod
def BondsToAsset(tp_solvency, factor_solvency, dependencies=['bonds_payable', 'total_assets']):
"""
:name: 应付债券与总资产之比
:desc: 应付债券MRQ/资产总计MRQ*100%
"""
management = tp_solvency.loc[:, dependencies]
management['BondsToAsset'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.bonds_payable.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def BookLev(tp_solvency, factor_solvency, dependencies=['total_non_current_liability', 'total_assets']):
"""
:name: 账面杠杆
:desc:非流动负债合计/股东权益合计(含少数股东权益)(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['BookLev'] = np.where(
CalcTools.is_zero(management.total_assets.values), 0,
management.total_non_current_liability.values / management.total_assets.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def CurrentRatio(tp_solvency, factor_solvency, dependencies=['total_current_assets', 'total_current_liability']):
"""
:name: 流动比率
:desc: 流动资产合计/流动负债合计(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
management['CurrentRatio'] = np.where(
CalcTools.is_zero(management.total_current_liability.values), 0,
management.total_current_assets.values / management.total_current_liability.values)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, on="security_code")
return factor_solvency
@staticmethod
def DA(tp_solvency, factor_solvency, dependencies=['total_liability', 'total_assets']):
"""
:name: 债务总资产比
:desc:负债合计MRQ/资产总计MRQ
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DA'] = np.where(
CalcTools.is_zero(contrarian['total_assets']), 0,
contrarian['total_liability'] / contrarian['total_assets'])
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def DTE(tp_solvency, factor_solvency,
dependencies=['total_liability', 'total_current_liability', 'fixed_assets']):
"""
:name:有形净值债务率
:desc:负债合计/有形净值(MRQ)
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['DTE'] = np.where(
CalcTools.is_zero(contrarian['total_current_liability'] + contrarian['fixed_assets']), 0,
contrarian['total_current_liability'] / (contrarian['total_current_liability'] + contrarian['fixed_assets'])
)
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, on="security_code")
return factor_solvency
@staticmethod
def EquityRatio(tp_solvency, factor_solvency,
dependencies=['total_liability', 'equities_parent_company_owners']):
"""
:name:权益比率
:desc:负债合计/归属母公司股东的权益(MRQ)
"""
management = tp_solvency.loc[:, dependencies]
func = lambda x: x[0] / x[1] if x[1] is not None and x[1] != 0 else None
management['EquityRatio'] = management.apply(func, axis=1)
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(management, factor_solvency, how='outer', on='security_code')
return factor_solvency
@staticmethod
def EquityPCToIBDebt(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable']):
"""
:name:归属母公司股东的权益/带息负债
:desc:归属母公司股东的权益/带息负债(补充 带息负债 = 短期借款+一年内到期的长期负债+长期借款+应付债券+应付利息)
"""
management = tp_solvency.loc[:, dependencies]
management["debt"] = (management.shortterm_loan +
management.non_current_liability_in_one_year +
management.longterm_loan +
management.bonds_payable +
management.interest_payable)
management['EquityPCToIBDebt'] = np.where(
CalcTools.is_zero(management.debt.values), 0,
management.equities_parent_company_owners.values / management.debt.values)
dependencies = dependencies + ['debt']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def EquityPCToTCap(tp_solvency, factor_solvency, dependencies=['equities_parent_company_owners',
'total_owner_equities',
'shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan', 'bonds_payable',
'interest_payable']):
"""
:name:归属母公司股东的权益/全部投入资本 (补充 全部投入资本=所有者权益合计+带息债务)
:desc: 归属母公司股东的权益/全部投入资本 (补充 全部投入资本=所有者权益合计+带息债务)
"""
management = tp_solvency.loc[:, dependencies]
management["tc"] = (management.total_owner_equities
+ management.shortterm_loan
+ management.non_current_liability_in_one_year
+ management.longterm_loan
+ management.bonds_payable
+ management.interest_payable)
management['EquityPCToTCap'] = np.where(
CalcTools.is_zero(management.tc.values), 0,
management.equities_parent_company_owners.values / management.tc.values)
dependencies = dependencies + ['tc']
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
# InteBearDebtToTotalCapital = 有息负债/总资本 总资本=固定资产+净运营资本 净运营资本=流动资产-流动负债
# InteBearDebtToTotalCapital = 有息负债/(固定资产 + 流动资产 - 流动负债)
@staticmethod
def IntBDToCap(tp_solvency, factor_solvency, dependencies=['shortterm_loan',
'non_current_liability_in_one_year',
'longterm_loan',
'bonds_payable',
'interest_payable',
'fixed_assets',
'total_current_assets',
'total_current_liability']):
"""
:name:带息负债/全部投入资本
:desc:带息债务/全部投入资本*100%(MRQ)
"""
contrarian = tp_solvency.loc[:, dependencies]
contrarian['interest_bearing_liability'] = contrarian['shortterm_loan'] + \
contrarian['non_current_liability_in_one_year'] + \
contrarian['longterm_loan'] + \
contrarian['bonds_payable'] + contrarian['interest_payable']
contrarian['IntBDToCap'] = np.where(
CalcTools.is_zero(contrarian['fixed_assets'] + contrarian['total_current_assets'] + \
contrarian['total_current_liability']), 0,
contrarian['interest_bearing_liability'] / (contrarian['fixed_assets'] + contrarian['total_current_assets']
+ contrarian['total_current_liability'])
)
dependencies = dependencies + ['interest_bearing_liability']
contrarian = contrarian.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, contrarian, how='outer', on="security_code")
return factor_solvency
@staticmethod
def LDebtToWCap(tp_solvency, factor_solvency, dependencies=['total_current_assets',
'total_current_liability',
'total_non_current_assets']):
"""
:name:长期负债与营运资金比率
:desc:非流动负债合计/(流动资产合计-流动负债合计)
"""
management = tp_solvency.loc[:, dependencies]
management['LDebtToWCap'] = np.where(
CalcTools.is_zero(management.total_current_assets.values - management.total_current_liability.values), 0,
management.total_non_current_assets.values
/ (management.total_current_assets.values - management.total_current_liability.values))
management = management.drop(dependencies, axis=1)
factor_solvency = pd.merge(factor_solvency, management, how='outer', on="security_code")
return factor_solvency
@staticmethod
def MktLev(tp_solvency, factor_solvency, dependencies=['total_non_current_liability', 'market_cap']):
"""
:name:市场杠杆
:desc:非流动负债合计MRQ/(非流动负债台计MRO+总市值)
"""
management = tp_solvency.loc[:, dependencies]
management['MktLev'] = np.where(
CalcTools.is_zero(management.market_cap.values), 0,
management.total_non_current_liability.values /
(management.total_non_current_liability.values + management.market_cap.values))
management = management.drop(dependencies, axis=1)
factor_solvency = | pd.merge(factor_solvency, management, how='outer', on="security_code") | pandas.merge |
import arcgis
import pandas as pd
import os
import arcpy
"""--------------------------------------------------------------------------------
Script Name: Clean Street Names
Description: This script fixes and cleans a layer with a "FullName" street
field. A "FullName" street field includes street name and street prefix. A "FLAG"
field is created in the output layer that shows fields with one element in its
string or fewer, or 5 elements in the string or more. This field can be used as
a inspect field for data integerity.
Examples:
INPUT OUTPUT
---------------------------------------------
walnut blv. ---> WALNUT BLVD
MaIn Street. ---> MAIN ST
Silver road east ---> E SILVER RD
89 Highway (Eastbound) ---> EB 89 HWY
knoll creek ---> KNOLL CR
SOUTH / richmond av ---> S RICHMOND AVE
An excel sheet is needed in order to run the script. The excel sheet is called "NameABBRVs"
and needs to be saved within the same directory as the script. It
contains two lists. One with street prefix's and one with street abrvs
Created By: <NAME>.
Date: 3/25/2019.
------------------------------------------------------------------------------------"""
arcpy.env.overwriteOutput = True
#inFC, AS PARAMETER, fcName REPRESENTS PATH TO THE FC, GET INFIELD FROM USER, GET OUTFC FROM USER
inFC = arcpy.GetParameterAsText(0)
fcName = os.path.basename(inFC)
inField = arcpy.GetParameterAsText(1)
outFC = arcpy.GetParameterAsText(2)
fc_df = | pd.DataFrame.spatial.from_featureclass(fcName) | pandas.DataFrame.spatial.from_featureclass |
import pandas as pd
import numpy as np
from .cleanning import delFromVardict
# # removed from version 0.0.8, replaced by calculating woe directly inside bitable
# def calcWOE(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt):
#
# woe = np.log((eachGoodCnt / eachBadCnt) / (allGoodCnt / allBadCnt))
#
# return woe
# # removed from version 0.0.8, replaced by calculating iv directly inside bitable
# def calcIV(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt):
# # calcIV(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt, label='DEFAULT')
# woe = calcWOE(allGoodCnt, allBadCnt, eachGoodCnt, eachBadCnt)
# ivcolumn = (eachGoodCnt / allGoodCnt - eachBadCnt / allBadCnt) * woe
# iv = sum(ivcolumn)
#
# return ivcolumn, iv
def bivariate(df, col, label, withIV=True, missingvalue='missing', dealMissing=True):
df = df.replace(np.nan, missingvalue)
gb = df.groupby(col, as_index=False)
total = df.shape[0]
all = gb.count()
bad = gb.sum()[label]
good = (all[label] - bad)
bitable = pd.DataFrame({col: all[col], 'total': good + bad, 'good': good, 'bad': bad}). \
replace(0, 0.001). \
assign(totalDist=lambda x: x.total / sum(x.total),
goodDist=lambda x: x.good / sum(x.good),
badDist=lambda x: x.bad / sum(x.bad),
goodRate=lambda x: x.good / (x.total),
badRate=lambda x: x.bad / (x.total)
). \
assign(woe=lambda x: np.log(x.badDist / x.goodDist))
if withIV:
bitable['iv'] = (bitable['badDist'] - bitable['goodDist']) * bitable['woe']
totalIV = sum(bitable['iv'])
bitable['totalIV']=totalIV
return bitable, totalIV
# return a dictionary of results of bivariate analysis
def getBiDict(df, label, getiv=False):
bidict = {}
ivdict = {}
for i in df.drop([label], axis=1).columns:
tmp = bivariate(df, i, label)
bidict[i] = tmp[0]
ivdict[i] = tmp[1]
ivtable=pd.DataFrame(list(ivdict.items()),columns=['feature','iv']).sort_values('iv',ascending=False)
if getiv:
return bidict,ivtable
else:
return bidict
### Transformation ###
######################
# 把woe table转换成字典格式, 生成新的df1包含全部woe以及label
def mapWOE(df, bidict, missingvalue='missing'):
df1 = df.copy()
df1 = df1.replace(np.nan, missingvalue)
for i in df1.columns:
if i in bidict.keys():
tmp = bidict[i]
tmpdict = pd.Series(tmp.woe.values, index=tmp[i]).to_dict()
tmplist = []
for j in df1[i]:
tmplist.append(tmpdict[j])
df1[i] = | pd.Series(tmplist) | pandas.Series |
import json
import os
import pandas as pd
from gender_guesser.detector import Detector
from tqdm import tqdm
from biorxiv_02_download_articles import ARTICLES_DIRECTORY, BIORXIV_DIRECTORY
def main():
detector = Detector(case_sensitive=False)
rows = []
for name in tqdm(os.listdir(ARTICLES_DIRECTORY)):
if not name.endswith('.json'):
continue
with open(os.path.join(ARTICLES_DIRECTORY, name)) as file:
j = json.load(file)
collection = j['collection']
if not collection:
tqdm.write(f'Empty collection for {name}')
continue
i = collection[0]
authors = i['authors'].split(';')
rows.append(dict(
id=i['doi'],
title=i['title'],
first_author_name=authors[0],
first_author_inferred_gender=fix_name(authors[0], detector),
license=i['license'],
category=i['category'].strip(),
posted=i['date'],
peer_reviewed=i['published'],
))
df = | pd.DataFrame(rows) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.feature_selection import VarianceThreshold
from fancyimpute import KNN
# , index_col='challengeID')
bg = pd.read_csv('../../ff_data/background.csv', low_memory=False)
bg.cf4fint = ((pd.to_datetime(bg.cf4fint) -
pd.to_datetime('1960-01-01')) / np.timedelta64(1, 'D')).astype(int)
# Loading bg such that some bugs in labels are fixed, see https://github.com/fragilefamilieschallenge/open-source-submissions/blob/master/rfjz%20-%2011%20submission/FF%20Pre-Imputation.ipynb
# , index_col='challengeID')
train = | pd.read_csv('../../ff_data/train.csv', low_memory=False) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# **Author: <NAME>, Data Scientist at [Quantillion](http://quantillion.io/)**
# # 0. Fire up
# In[ ]:
import pandas as pd
import numpy as np
df = | pd.read_csv('../input/kc_house_data.csv') | pandas.read_csv |
import pandas as pd
import itertools
import pandas_datareader.data as web
from pandas.tseries.offsets import BDay
from datetime import datetime
import random
import math
import collections
# Stocks data
tickers = ['FDX', 'GOOGL', 'XOM', 'KO', 'NOK', 'MS', 'IBM']
stocks_df = web.DataReader(tickers, 'yahoo', start=datetime(2010, 1, 1)).High
# Descriptors guarantee the type and behavior of variables
class Date:
"""General descriptor for date"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if isinstance(value, datetime):
instance.__dict__[self.storage_name] = value
else:
raise ValueError('value must be a datetime')
class OneOfStock:
"""General descriptor for stocks"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if value in set(tickers):
instance.__dict__[self.storage_name] = value
else:
raise ValueError("value must be on of 'FDX', 'GOOGL', 'XOM', 'KO', 'NOK', 'MS', 'IBM'")
class OneOfMode:
"""General descriptor for investor mode"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if value in {defensive, aggressive, mixed}:
instance.__dict__[self.storage_name] = value
else:
raise ValueError("value must be on of defensive, aggressive, mixed")
class Monetary:
"""General descriptor for monetary entries"""
def __init__(self, storage_name):
self.storage_name = storage_name
def __set__(self, instance, value):
if (value is None) or (value >= 0):
instance.__dict__[self.storage_name] = value
else:
raise ValueError('value must be >= 0')
class Investment:
"""Investment is an abstract object. Bonds and Stocks inherit attributes and methods"""
pv = Monetary('pv')
start_date = Date('start_date')
end_date = Date('end_date')
def __init__(self, pv, start_date, end_date):
self.pv = pv
self.start_date = start_date
self.end_date = end_date
self.term = self.end_date - self.start_date
self.cash_flow = None
def return_on_investment(self):
return round((self.cash_flow.iloc[-1, 0] / self.pv) - 1, 4)
def total_return(self):
return round(self.cash_flow.iloc[-1, 0] - self.pv, 2)
def risk_on_investment(self):
return round(self.cash_flow.pct_change().std(), 4)
class Bonds(Investment):
"""All Bonds"""
def __init__(self, pv, rate: float, start_date, end_date):
super(Bonds, self).__init__(pv, start_date, end_date)
self.rate = rate
self.rate_flow = pd.Series(itertools.repeat((1 + self.rate) ** (1 / 365) - 1, self.term.days))
self.rate_flow.iloc[0] = 0
cash_flow = pd.DataFrame({'Date': pd.date_range(self.start_date, end=self.end_date, freq="D", closed='left'),
'Value': (1 + self.rate_flow).cumprod() * self.pv})
self.cash_flow = cash_flow.set_index('Date')
@classmethod # Call a bond as a short one. Ensure rate, min price and min period
def short(cls, start_date, pv=250):
if pv < 250:
raise ValueError('pv must be >= 250')
rate, end_date = 0.015, start_date + pd.DateOffset(years=2)
bond = cls(pv, rate, start_date, end_date)
return bond
@classmethod # Call a bond as a long one. Ensure rate, min price and min period
def long(cls, start_date, pv=1000):
if pv < 1000:
raise ValueError('pv must be >= 1000')
rate, end_date = 0.03, start_date + | pd.DateOffset(years=5) | pandas.DateOffset |
#!/usr/bin/env python3
import os
import sys
import pandas as pd
import numpy as np
import tensorflow as tf
from Bio import SeqIO
from numpy import array
from numpy import argmax
from warnings import simplefilter
from contextlib import redirect_stderr
from keras.preprocessing.text import Tokenizer
# Hide warning messages
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
simplefilter(action='ignore', category=FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
with redirect_stderr(open(os.devnull, "w")):
from tensorflow.keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
# Show full array
pd.set_option('display.max_rows', None)
np.set_printoptions(threshold=sys.maxsize)
# IO files
INFASTA = sys.argv[1]
RESCSV = sys.argv[2] if len(sys.argv) >=3 else None
# Get model
MODELO = 'model_embedded_order_wb.hdf5'
PADVALUE = 38797
def fasta_frame(fasta_file):
fids = []
fseq = []
with open(fasta_file) as fasta:
for record in SeqIO.parse(fasta, 'fasta'):
fids.append(record.id)
fseq.append(str(record.seq).lower())
s1 = pd.Series(fids, name = 'id')
s2 = pd.Series(fseq, name = 'sequence')
data = {'id':s1, 'sequence':s2}
df = pd.concat(data, axis=1)
return df
# Read fasta as dataframe
fas_df = fasta_frame(INFASTA)
identifiers = fas_df['id']
sequences = fas_df['sequence']
# Labels
te_labels = {'te': 1, 'nt': 2}
# Tokenize sequences
tkz_seq = Tokenizer(num_words = None, split = ' ', char_level = True, lower = True)
tkz_seq.fit_on_texts(sequences)
x_seq_arrays = tkz_seq.texts_to_sequences(sequences)
vocab_size_seq = len(tkz_seq.word_index) + 1
# Pad sequences
padded_seqs = pad_sequences(x_seq_arrays, padding='post', maxlen = PADVALUE)
# Load model
modelo = load_model(MODELO)
# Predict labels
pred_labels = modelo.predict_classes(padded_seqs, batch_size = 2)
mapped_labels = [k for label in pred_labels for k, v in te_labels.items() if v == label]
# Results
mapped_series = pd.Series(mapped_labels)
results_dict = {"id": identifiers, "classification": mapped_series}
results_df = | pd.DataFrame(results_dict) | pandas.DataFrame |
import os
from abc import ABC
import json
import numpy as np
import pandas as pd
from odin.classes import DatasetInterface, TaskType
from odin.utils import *
from odin.utils.utils import encode_segmentation, compute_aspect_ratio_of_segmentation
from pycocotools import mask
from pycocotools import coco
logger = get_root_logger()
class DatasetLocalization(DatasetInterface, ABC):
annotations = None
images = None
possible_analysis = set()
area_size_computed = False
aspect_ratio = False
common_properties = {'area', 'bbox', 'category_id', 'id', 'image_id', 'iscrowd', 'segmentation'}
supported_types = [TaskType.OBJECT_DETECTION, TaskType.INSTANCE_SEGMENTATION]
def __init__(self, dataset_gt_param, task_type, proposal_path=None, images_set_name='test',
images_abs_path=None, similar_classes=None, property_names=None, terminal_env=False,
properties_file=None, for_analysis=True, match_on_filename=False):
if task_type not in self.supported_types:
logger.error(f"Task not supported: {task_type}")
super().__init__(dataset_gt_param, proposal_path, images_set_name, images_abs_path, similar_classes,
property_names, terminal_env, properties_file, match_on_filename)
self.objnames_TP_graphs = [] # clear. the TRUE POSITIVE that can be drawn
self.possible_analysis = set() # clear. It would be updated according to the dataset provided
self.is_segmentation = task_type == TaskType.INSTANCE_SEGMENTATION
self.similar_classes = similar_classes
self.for_analysis = for_analysis
self.load()
def dataset_type_name(self):
return self.images_set_name
def get_annotations_from_class_list(self, classes_to_classify):
classes_id_filter = self.get_categories_id_from_names(classes_to_classify)
anns_filtered = self.annotations[self.annotations["category_id"].isin(classes_id_filter)]
return anns_filtered.to_dict("records")
def is_segmentation_ds(self):
return self.is_segmentation
def __load_proposals(self):
counter = 0
issues = 0
proposals = []
for i, cat in self.categories.iterrows():
c = cat["name"]
c_id = cat["id"]
proposal_path = os.path.join(self.proposal_path, c + ".txt")
with open(proposal_path, "r") as file:
for line in file:
if self.is_segmentation:
try:
arr = line.split(" ")
match_param, confidence = arr[0], float(arr[1])
if not self.match_on_filename:
match_param = int(match_param)
try:
segmentation = [float(v) for v in arr[2:]]
except:
segmentation = []
counter += 1
proposals.append(
{"confidence": confidence, "segmentation": segmentation, self.match_param_props: match_param,
"category_id": c_id, "id": counter})
except:
issues += 1
else:
try:
match_param, confidence, x1, y1, x2, y2 = line.split(" ")
if not self.match_on_filename:
match_param = int(match_param)
confidence = float(confidence)
x1, y1, x2, y2 = float(x1), float(y1), float(x2), float(y2)
counter += 1
proposals.append(
{"confidence": confidence, "bbox": [x1, y1, x2, y2], self.match_param_props: match_param,
"category_id": c_id, "id": counter})
except:
issues += 1
self.__proposals_length = counter
logger.info("Loaded {} proposals and failed with {}".format(counter, issues))
return | pd.DataFrame(proposals) | pandas.DataFrame |
"""
Functions for training classifiers on TCGA data.
Some of these functions are adapted from:
https://github.com/greenelab/BioBombe/blob/master/9.tcga-classify/scripts/tcga_util.py
"""
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import (
roc_auc_score,
roc_curve,
precision_recall_curve,
average_precision_score
)
from sklearn.model_selection import (
cross_val_predict,
GridSearchCV,
)
import mpmp.config as cfg
def train_classifier(X_train,
X_test,
y_train,
alphas,
l1_ratios,
seed,
n_folds=4,
max_iter=1000):
"""
Build the logic and sklearn pipelines to predict binary y from dataset x
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
alphas: list of alphas to perform cross validation over
l1_ratios: list of l1 mixing parameters to perform cross validation over
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation
"""
# Setup the classifier parameters
clf_parameters = {
'classify__alpha': alphas,
'classify__l1_ratio': l1_ratios,
}
estimator = Pipeline(
steps=[
(
'classify',
SGDClassifier(
random_state=seed,
class_weight='balanced',
loss='log',
penalty='elasticnet',
max_iter=max_iter,
tol=1e-3,
),
)
]
)
cv_pipeline = GridSearchCV(
estimator=estimator,
param_grid=clf_parameters,
n_jobs=-1,
cv=n_folds,
scoring='average_precision',
return_train_score=True,
)
# Fit the model
cv_pipeline.fit(X=X_train, y=y_train.status)
# Obtain cross validation results
y_cv = cross_val_predict(
cv_pipeline.best_estimator_,
X=X_train,
y=y_train.status,
cv=n_folds,
method='decision_function',
)
# Get all performance results
y_predict_train = cv_pipeline.decision_function(X_train)
y_predict_test = cv_pipeline.decision_function(X_test)
return cv_pipeline, y_predict_train, y_predict_test, y_cv
def train_gb_classifier(X_train,
X_test,
y_train,
learning_rates,
alphas,
lambdas,
seed,
n_folds=4,
max_iter=1000):
"""
Fit gradient-boosted tree classifier to training data, and generate predictions
for test data.
Arguments
---------
X_train: pandas DataFrame of feature matrix for training data
X_test: pandas DataFrame of feature matrix for testing data
y_train: pandas DataFrame of processed y matrix (output from align_matrices())
n_folds: int of how many folds of cross validation to perform
max_iter: the maximum number of iterations to test until convergence
Returns
------
The full pipeline sklearn object and y matrix predictions for training, testing,
and cross validation
"""
from lightgbm import LGBMClassifier
clf_parameters = {
'classify__learning_rate': learning_rates,
'classify__reg_alpha': alphas,
'classify__reg_lambda': lambdas,
}
estimator = Pipeline(
steps=[
(
'classify',
LGBMClassifier(
random_state=seed,
class_weight='balanced',
max_depth=5,
n_estimators=100,
colsample_bytree=0.35
),
)
]
)
cv_pipeline = GridSearchCV(
estimator=estimator,
param_grid=clf_parameters,
n_jobs=-1,
cv=n_folds,
scoring='average_precision',
return_train_score=True,
)
# Fit the model
cv_pipeline.fit(X=X_train, y=y_train.status)
# Obtain cross validation results
y_cv = cross_val_predict(
cv_pipeline.best_estimator_,
X=X_train,
y=y_train.status,
cv=n_folds,
method='predict_proba',
)[:, 1]
# Get all performance results
y_predict_train = cv_pipeline.predict_proba(X_train)[:, 1]
y_predict_test = cv_pipeline.predict_proba(X_test)[:, 1]
return cv_pipeline, y_predict_train, y_predict_test, y_cv
def get_preds(X_test_df, y_test_df, cv_pipeline, fold_no):
"""Get model-predicted probability of positive class for test data.
Also returns true class, to enable quantitative comparisons in analyses.
"""
# get probability of belonging to positive class
y_scores_test = cv_pipeline.decision_function(X_test_df)
y_probs_test = cv_pipeline.predict_proba(X_test_df)
# make sure we're actually looking at positive class prob
assert np.array_equal(cv_pipeline.best_estimator_.classes_,
np.array([0, 1]))
return pd.DataFrame({
'fold_no': fold_no,
'true_class': y_test_df.status,
'score': y_scores_test,
'positive_prob': y_probs_test[:, 1]
}, index=y_test_df.index)
def get_threshold_metrics(y_true, y_pred, drop=False):
"""
Retrieve true/false positive rates and auroc/aupr for class predictions
Arguments
---------
y_true: an array of gold standard mutation status
y_pred: an array of predicted mutation status
drop: boolean if intermediate thresholds are dropped
Returns
-------
dict of AUROC, AUPR, pandas dataframes of ROC and PR data, and cancer-type
"""
roc_columns = ["fpr", "tpr", "threshold"]
pr_columns = ["precision", "recall", "threshold"]
roc_results = roc_curve(y_true, y_pred, drop_intermediate=drop)
roc_items = zip(roc_columns, roc_results)
roc_df = pd.DataFrame.from_dict(dict(roc_items))
prec, rec, thresh = precision_recall_curve(y_true, y_pred)
pr_df = pd.DataFrame.from_records([prec, rec]).T
pr_df = pd.concat([pr_df, | pd.Series(thresh) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 10:15:25 2021
@author: lenakilian
"""
import pandas as pd
import copy as cp
import geopandas as gpd
import seaborn as sns
import matplotlib.pyplot as plt
wd = r'/Users/lenakilian/Documents/Ausbildung/UoLeeds/PhD/Analysis/'
years = list(range(2007, 2018, 2))
geog = 'MSOA'
yr = 2015
dict_cat = 'category_8'
cat_dict = | pd.read_excel(wd + '/data/processed/LCFS/Meta/lcfs_desc_anne&john.xlsx') | pandas.read_excel |
import time
from utils import utils
import argparse
import numpy as np
import pandas as pd
from sklearn import metrics
from keras.models import load_model
from config.echelon_meta import EchelonMeta
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='ECHELON TIER 1 Neural Network')
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--verbose', type=int, default=0)
parser.add_argument('--limit', type=float, default=0.)
parser.add_argument('--model_path', type=str, default=None)
parser.add_argument('--model_names', type=str, default=['echelon_section']) #'echelon.text', 'echelon.rdata', 'echelon.rsrc', 'echelon.data', 'echelon.pdata', 'echelon.header'])
parser.add_argument('--model_ext', type=str, default='.h5')
parser.add_argument('--result_path', type=str, default=None)
parser.add_argument('--confidence_levels', type=int, default=[99]) #[40, 50, 60, 70]) # Percentage
parser.add_argument('--target_confidence', type=int, default=70)
parser.add_argument('--csv', type=str, default=None)
'''
def predict_by_section(wpartition, spartition, sections, model, fn_list, label, batch_size, verbose):
max_len = 0
if len(sections) == 1:
max_len = model.input.shape[1]
else:
max_len = model.input[0].shape[1]
pred = model.predict_generator(
utils.data_generator_by_section(wpartition, spartition, sections, fn_list, label, max_len, batch_size, shuffle=False),
steps=len(fn_list) // batch_size + 1,
verbose=verbose
)
return pred'''
def trigger_predict_by_section():
metaObj = EchelonMeta()
metaObj.project_details()
args = parser.parse_args()
st = time.time()
all_sections_pred = []
# read data
df = pd.read_csv(args.csv, header=None)
fn_list = df[0].values
Y = df[1]
label = np.zeros((fn_list.shape))
for model_name in args.model_names:
sections = ['.header', '.rsrc', '.data']#['.rsrc', '.text', '.rdata'] #model_name.split('.')[1]
#sections = ['.header', '.debug', '.idata']
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ [PROCESSING SECTION -> ' + str(sections)
+ '] [MODEL NAME -> ' + model_name + args.model_ext + ']')
# load model
model = load_model(args.model_path + model_name + args.model_ext)
#model.summary()
pred = predict_by_section(sections, model, fn_list, label, args.batch_size, args.verbose)
for confidence in args.confidence_levels:
df['predicted score - rounded ' + str(confidence) + '% confidence'] = pred // (confidence/100)
acc = metrics.accuracy_score(Y, pred > (confidence/100))
bacc = metrics.balanced_accuracy_score(Y, pred > (confidence/100))
cm = metrics.confusion_matrix(Y, pred > (confidence/100))
#print('Confusion Matrix:\t\t[Confidence: ' + str(confidence) + '%] [Acc: ' + str(acc) + "] [Balanced Acc: " + str(bacc) + ']\n tn fp fn tp')
#print("%5s%5s%5s%5s" % (str(cm[0][0]), str(cm[0][1]), str(cm[1][0]), str(cm[1][1])))
#print("Checking results",acc,bacc,cm)
print("%3s" % str(confidence), " & ", str(acc)[:6], " & %5s & %5s & %5s & %5s \\\\\\hline" % (str(cm[0][0]), str(cm[0][1]), str(cm[1][0]), str(cm[1][1])) )#, " \\\\\n \\hline")
#roc = metrics.roc_curve(Y, pred > (confidence/100))
# print("ROC Curve : ", roc)
#print("\n")
#auc = metrics.roc_auc_score(Y, pred)
#print("Overall ROC AUC Score : ", auc)
fpr, tpr, thds = metrics.roc_curve(Y, pred)
#auc = metrics.roc_auc_score(Y, pred)
#print("Overall ROC AUC Score : ", auc) # , fpr, tpr)
#Malconv roc auc
#maldf = pd.read_csv('aucmalconv.csv', header=None)
#malfpr = maldf[0]
#maltpr = maldf[1]
#plt.plot(malfpr, maltpr, label="auc=0.9983861824925196")
'''plt.plot(fpr, tpr, label="auc=" + str(auc))
plt.plot([0, 1], [0, 1], linestyle='--')
plt.legend(loc=4)
plt.show()
aucdf = pd.DataFrame()
aucdf['fpr'] = fpr
aucdf['tpr'] = tpr
aucdf['thds'] = thds
aucdf.to_csv('aucechelon.csv', header=None, index=False)'''
df['predict score'] = pred
df[0] = [i.split('/')[-1] for i in fn_list] # os.path.basename
df.to_csv(args.result_path + model_name + ".result.csv", header=None, index=False)
print('Results writen in', args.result_path + model_name + ".result.csv")
malware_list = []
combined_pred = []
all_sections_pred.append(pred // (args.target_confidence/100))
for i in range(0, len(fn_list)):
if (all_sections_pred[0][i]) >= 1:# + all_sections_pred[1][i] + all_sections_pred[2][i]) >= 3:
combined_pred.append(1)
malware_list.append(fn_list[i])
else:
combined_pred.append(0)
print("\n\nECHELON TIER-2 RESULTS:\n-----------------------\nNumber of files processed: ", len(fn_list))
'''
acc = metrics.accuracy_score(Y, combined_pred)
bacc = metrics.balanced_accuracy_score(Y, combined_pred)
cm = metrics.confusion_matrix(Y, combined_pred)
print('Confusion Matrix:\t\t[Confidence: ' + str(args.target_confidence) + '%] [Acc: ' + str(acc) + "] [Balanced Acc: "
+ str(bacc) + ']\n tn fp fn tp')
tn = cm[0][0]
fp = cm[0][1]
fn = cm[1][0]
tp = cm[1][1]
# roc = metrics.roc_curve(Y, pred > (confidence / 100))
print("%5s%5s%5s%5s" % (str(tn), str(fp), str(fn), str(tp)), "FPR: ", fp / (fp + tn), "FNR:", fn / (fn + tp))'''
print("\n TOTAL TIME ELAPSED FOR SECTION-WISE PREDICTION - ", str(int(time.time() - st) / 60), " minutes\n")
reconcile(True)
def reconcile(flag):
print("\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ RECONCILING DATA $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n\n")
t1 = pd.read_csv('echelon.result.csv', header=None)
y1 = t1[1]
p1 = t1[2]
pv1 = t1[3]
t2 = pd.read_csv('echelon_section.result.csv', header=None)
t2.to_csv("echelon_reconciled.csv", header=None, index=False, mode='w')
mfp = | pd.read_csv('malware_fp.csv', header=None) | pandas.read_csv |
from Connections.predictor import *
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import accuracy_score
from InterpretationTechniques.PlotAndShow import *
def featureAnnulation(data, pr, annulationValue = 0):
'''
:param data: pandas dataframe with datasets where each row represents a dataset
:param resultColumnName: Name of column in data that contains actual results
:param pr: Predictor of ML-System
:return:
'''
resultColumnName = pr.resultColumn
accuracies = | pd.Series() | pandas.Series |
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT,
NaTType,
frequencies as libfrequencies,
iNaT,
period as libperiod,
)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_TD_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = libfrequencies.get_freq_code(self.freq)
result = get_period_field_arr(alias, self.asi8, base)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class PeriodArray(dtl.DatetimeLikeArrayMixin, dtl.DatelikeOps):
"""
Pandas ExtensionArray for storing Period data.
Users should use :func:`period_array` to create new instances.
Parameters
----------
values : Union[PeriodArray, Series[period], ndarray[int], PeriodIndex]
The data to store. These should be arrays that can be directly
converted to ordinals without inference or copy (PeriodArray,
ndarray[int64]), or a box around such an array (Series[period],
PeriodIndex).
freq : str or DateOffset
The `freq` to use for the array. Mostly applicable when `values`
is an ndarray of integers, when `freq` is required. When `values`
is a PeriodArray (or box around), it's checked that ``values.freq``
matches `freq`.
dtype : PeriodDtype, optional
A PeriodDtype instance from which to extract a `freq`. If both
`freq` and `dtype` are specified, then the frequencies must match.
copy : bool, default False
Whether to copy the ordinals before storing.
Attributes
----------
None
Methods
-------
None
See Also
--------
period_array : Create a new PeriodArray.
PeriodIndex : Immutable Index for period data.
Notes
-----
There are two components to a PeriodArray
- ordinals : integer ndarray
- freq : pd.tseries.offsets.Offset
The values are physically stored as a 1-D ndarray of integers. These are
called "ordinals" and represent some kind of offset from a base.
The `freq` indicates the span covered by each element of the array.
All elements in the PeriodArray have the same `freq`.
"""
# array priority higher than numpy scalars
__array_priority__ = 1000
_typ = "periodarray" # ABCPeriodArray
_scalar_type = Period
_recognized_scalars = (Period,)
_is_recognized_dtype = is_period_dtype
# Names others delegate to us
_other_ops: List[str] = []
_bool_ops = ["is_leap_year"]
_object_ops = ["start_time", "end_time", "freq"]
_field_ops = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"weekday",
"week",
"dayofweek",
"dayofyear",
"quarter",
"qyear",
"days_in_month",
"daysinmonth",
]
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["strftime", "to_timestamp", "asfreq"]
# --------------------------------------------------------------------
# Constructors
def __init__(self, values, freq=None, dtype=None, copy=False):
freq = validate_dtype_freq(dtype, freq)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
if isinstance(values, ABCSeries):
values = values._values
if not isinstance(values, type(self)):
raise TypeError("Incorrect dtype")
elif isinstance(values, ABCPeriodIndex):
values = values._values
if isinstance(values, type(self)):
if freq is not None and freq != values.freq:
raise raise_on_incompatible(values, freq)
values, freq = values._data, values.freq
values = np.array(values, dtype="int64", copy=copy)
self._data = values
if freq is None:
raise ValueError("freq is not specified and cannot be inferred")
self._dtype = PeriodDtype(freq)
@classmethod
def _simple_new(cls, values: np.ndarray, freq=None, **kwargs):
# alias for PeriodArray.__init__
assert isinstance(values, np.ndarray) and values.dtype == "i8"
return cls(values, freq=freq, **kwargs)
@classmethod
def _from_sequence(
cls,
scalars: Sequence[Optional[Period]],
dtype: Optional[PeriodDtype] = None,
copy: bool = False,
) -> ABCPeriodArray:
if dtype:
freq = dtype.freq
else:
freq = None
if isinstance(scalars, cls):
validate_dtype_freq(scalars.dtype, freq)
if copy:
scalars = scalars.copy()
return scalars
periods = np.asarray(scalars, dtype=object)
if copy:
periods = periods.copy()
freq = freq or libperiod.extract_freq(periods)
ordinals = libperiod.extract_ordinals(periods, freq)
return cls(ordinals, freq=freq)
@classmethod
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
return cls._from_sequence(strings, dtype, copy)
@classmethod
def _from_datetime64(cls, data, freq, tz=None):
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
periods = dtl.validate_periods(periods)
if freq is not None:
freq = Period._maybe_convert_freq(freq)
field_count = len(fields)
if start is not None or end is not None:
if field_count > 0:
raise ValueError(
"Can either instantiate from fields or endpoints, but not both"
)
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError("Not enough parameters to construct Period range")
return subarr, freq
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value: Union[Period, NaTType]) -> int:
if value is NaT:
return value.value
elif isinstance(value, self._scalar_type):
if not isna(value):
self._check_compatible_with(value)
return value.ordinal
else:
raise ValueError(f"'value' should be a Period. Got '{value}' instead.")
def _scalar_from_string(self, value: str) -> Period:
return Period(value, freq=self.freq)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
if self.freqstr != other.freqstr:
raise raise_on_incompatible(self, other)
# --------------------------------------------------------------------
# Data / Attributes
@cache_readonly
def dtype(self):
return self._dtype
# error: Read-only property cannot override read-write property [misc]
@property # type: ignore
def freq(self):
"""
Return the frequency object for this PeriodArray.
"""
return self.dtype.freq
def __array__(self, dtype=None) -> np.ndarray:
# overriding DatetimelikeArray
return np.array(list(self), dtype=object)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow
from pandas.core.arrays._arrow_utils import ArrowPeriodType
if type is not None:
if pyarrow.types.is_integer(type):
return pyarrow.array(self._data, mask=self.isna(), type=type)
elif isinstance(type, ArrowPeriodType):
# ensure we have the same freq
if self.freqstr != type.freq:
raise TypeError(
"Not supported to convert PeriodArray to array with different "
f"'freq' ({self.freqstr} vs {type.freq})"
)
else:
raise TypeError(
f"Not supported to convert PeriodArray to '{type}' type"
)
period_type = ArrowPeriodType(self.freqstr)
storage_array = pyarrow.array(self._data, mask=self.isna(), type="int64")
return pyarrow.ExtensionArray.from_storage(period_type, storage_array)
# --------------------------------------------------------------------
# Vectorized analogues of Period properties
year = _field_accessor(
"year",
0,
"""
The year of the period.
""",
)
month = _field_accessor(
"month",
3,
"""
The month as January=1, December=12.
""",
)
day = _field_accessor(
"day",
4,
"""
The days of the period.
""",
)
hour = _field_accessor(
"hour",
5,
"""
The hour of the period.
""",
)
minute = _field_accessor(
"minute",
6,
"""
The minute of the period.
""",
)
second = _field_accessor(
"second",
7,
"""
The second of the period.
""",
)
weekofyear = _field_accessor(
"week",
8,
"""
The week ordinal of the year.
""",
)
week = weekofyear
dayofweek = _field_accessor(
"dayofweek",
10,
"""
The day of the week with Monday=0, Sunday=6.
""",
)
weekday = dayofweek
dayofyear = day_of_year = _field_accessor(
"dayofyear",
9,
"""
The ordinal day of the year.
""",
)
quarter = _field_accessor(
"quarter",
2,
"""
The quarter of the date.
""",
)
qyear = _field_accessor("qyear", 1)
days_in_month = _field_accessor(
"days_in_month",
11,
"""
The number of days in the month.
""",
)
daysinmonth = days_in_month
@property
def is_leap_year(self):
"""
Logical indicating if the date belongs to a leap year.
"""
return isleapyear_arr(np.asarray(self.year))
@property
def start_time(self):
return self.to_timestamp(how="start")
@property
def end_time(self):
return self.to_timestamp(how="end")
def to_timestamp(self, freq=None, how="start"):
"""
Cast to DatetimeArray/Index.
Parameters
----------
freq : str or DateOffset, optional
Target frequency. The default is 'D' for week or longer,
'S' otherwise.
how : {'s', 'e', 'start', 'end'}
Whether to use the start or end of the time period being converted.
Returns
-------
DatetimeArray/Index
"""
from pandas.core.arrays import DatetimeArray
how = libperiod._validate_end_alias(how)
end = how == "E"
if end:
if freq == "B":
# roll forward to ensure we land on B date
adjust = Timedelta(1, "D") - Timedelta(1, "ns")
return self.to_timestamp(how="start") + adjust
else:
adjust = Timedelta(1, "ns")
return (self + self.freq).to_timestamp(how="start") - adjust
if freq is None:
base, mult = libfrequencies.get_freq_code(self.freq)
freq = libfrequencies.get_to_timestamp_base(base)
else:
freq = Period._maybe_convert_freq(freq)
base, mult = libfrequencies.get_freq_code(freq)
new_data = self.asfreq(freq, how=how)
new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base)
return DatetimeArray._from_sequence(new_data, freq="infer")
# --------------------------------------------------------------------
# Array-like / EA-Interface Methods
def _values_for_argsort(self):
return self._data
# --------------------------------------------------------------------
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or str
Frequency increment to shift by.
"""
if freq is not None:
raise TypeError(
"`freq` argument is not supported for "
f"{type(self).__name__}._time_shift"
)
values = self.asi8 + periods * self.freq.n
if self._hasnans:
values[self._isnan] = iNaT
return type(self)(values, freq=self.freq)
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def asfreq(self, freq=None, how="E"):
"""
Convert the Period Array/Index to the specified frequency `freq`.
Parameters
----------
freq : str
A frequency.
how : str {'E', 'S'}
Whether the elements should be aligned to the end
or start within pa period.
* 'E', 'END', or 'FINISH' for end,
* 'S', 'START', or 'BEGIN' for start.
January 31st ('END') vs. January 1st ('START') for example.
Returns
-------
Period Array/Index
Constructed with the new frequency.
Examples
--------
>>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A')
>>> pidx
PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'],
dtype='period[A-DEC]', freq='A-DEC')
>>> pidx.asfreq('M')
PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12',
'2015-12'], dtype='period[M]', freq='M')
>>> pidx.asfreq('M', how='S')
PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01',
'2015-01'], dtype='period[M]', freq='M')
"""
how = libperiod._validate_end_alias(how)
freq = Period._maybe_convert_freq(freq)
base1, mult1 = libfrequencies.get_freq_code(self.freq)
base2, mult2 = libfrequencies.get_freq_code(freq)
asi8 = self.asi8
# mult1 can't be negative or 0
end = how == "E"
if end:
ordinal = asi8 + mult1 - 1
else:
ordinal = asi8
new_data = period_asfreq_arr(ordinal, base1, base2, end)
if self._hasnans:
new_data[self._isnan] = iNaT
return type(self)(new_data, freq=freq)
# ------------------------------------------------------------------
# Rendering Methods
def _formatter(self, boxed=False):
if boxed:
return str
return "'{}'".format
def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: str(dt)
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
# ------------------------------------------------------------------
def astype(self, dtype, copy=True):
# We handle Period[T] -> Period[U]
# Our parent handles everything else.
dtype = pandas_dtype(dtype)
if is_period_dtype(dtype):
return self.asfreq(dtype.freq)
return super().astype(dtype, copy=copy)
# ------------------------------------------------------------------
# Arithmetic Methods
def _sub_datelike(self, other):
assert other is not NaT
return NotImplemented
def _sub_period(self, other):
# If the operation is well-defined, we return an object-Index
# of DateOffsets. Null entries are filled with pd.NaT
self._check_compatible_with(other)
asi8 = self.asi8
new_data = asi8 - other.ordinal
new_data = np.array([self.freq * x for x in new_data])
if self._hasnans:
new_data[self._isnan] = NaT
return new_data
def _addsub_int_array(
self, other: np.ndarray, op: Callable[[Any, Any], Any],
) -> "PeriodArray":
"""
Add or subtract array of integers; equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : np.ndarray[integer-dtype]
op : {operator.add, operator.sub}
Returns
-------
result : PeriodArray
"""
assert op in [operator.add, operator.sub]
if op is operator.sub:
other = -other
res_values = algos.checked_add_with_arr(self.asi8, other, arr_mask=self._isnan)
res_values = res_values.view("i8")
res_values[self._isnan] = iNaT
return type(self)(res_values, freq=self.freq)
def _add_offset(self, other):
assert not isinstance(other, Tick)
base = libfrequencies.get_base_alias(other.rule_code)
if base != self.freq.rule_code:
raise raise_on_incompatible(self, other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
result = super()._add_timedeltalike_scalar(other.n)
return type(self)(result, freq=self.freq)
def _add_timedeltalike_scalar(self, other):
"""
Parameters
----------
other : timedelta, Tick, np.timedelta64
Returns
-------
result : ndarray[int64]
"""
assert isinstance(self.freq, Tick) # checked by calling function
assert isinstance(other, (timedelta, np.timedelta64, Tick))
if notna(other):
# special handling for np.timedelta64("NaT"), avoid calling
# _check_timedeltalike_freq_compat as that would raise TypeError
other = self._check_timedeltalike_freq_compat(other)
# Note: when calling parent class's _add_timedeltalike_scalar,
# it will call delta_to_nanoseconds(delta). Because delta here
# is an integer, delta_to_nanoseconds will return it unchanged.
ordinals = super()._add_timedeltalike_scalar(other)
return ordinals
def _add_delta_tdi(self, other):
"""
Parameters
----------
other : TimedeltaArray or ndarray[timedelta64]
Returns
-------
result : ndarray[int64]
"""
assert isinstance(self.freq, Tick) # checked by calling function
if not np.all(isna(other)):
delta = self._check_timedeltalike_freq_compat(other)
else:
# all-NaT TimedeltaIndex is equivalent to a single scalar td64 NaT
return self + np.timedelta64("NaT")
return self._addsub_int_array(delta, operator.add).asi8
def _add_delta(self, other):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new PeriodArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
raise raise_on_incompatible(self, other)
new_ordinals = super()._add_delta(other)
return type(self)(new_ordinals, freq=self.freq)
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
own_offset = frequencies.to_offset(self.freq.rule_code)
base_nanos = delta_to_nanoseconds(own_offset)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == "m"
if other.dtype != _TD_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(_TD_DTYPE)
nanos = other.view("i8")
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
raise raise_on_incompatible(self, other)
def raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : None, DateOffset, Period, ndarray, or timedelta-like
Returns
-------
IncompatibleFrequency
Exception to be raised by the caller.
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, np.ndarray) or right is None:
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
other_freq = right.freqstr
else:
other_freq = _delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(
cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq
)
return IncompatibleFrequency(msg)
# -------------------------------------------------------------------
# Constructor Helpers
def period_array(
data: Sequence[Optional[Period]],
freq: Optional[Union[str, Tick]] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
if is_datetime64_dtype(data):
return PeriodArray._from_datetime64(data, freq)
if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple)):
data = list(data)
data = np.asarray(data)
dtype: Optional[PeriodDtype]
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = | pandas_dtype(dtype) | pandas.core.dtypes.common.pandas_dtype |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import pandas as pd
import re
import util
import os
import entrez as ez
import stats
import parallel
import xgmml
import db
import random
from six.moves import range
import setting
class Cache(object):
DATA_DIR=setting.go['DATA_DIR']
# share by all tax_id
CATEGORY={'LOCAL':None, 'GPDB':None, 'L1k':None}
GO_DESCRIPTION={'LOCAL':None, 'GPDB':None, 'L1k':None}
GO_CATEGORY={'LOCAL':None, 'GPDB':None, 'L1k':None}
# per tax_id
TOTAL_GENE_COUNT={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
ALL_GENE={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
CATEGORY_COUNT={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GO_GENE_ENRICH={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GO_GENE={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
GENE_GO={'LOCAL':{}, 'GPDB':{}, 'L1k':{}}
N_TRIVIAL=800
@staticmethod
def get(l_use_GPDB=True, tax_id=9606, l_L1k=False):
if tax_id==-9606: tax_id=9606
s_key=Cache.key(l_use_GPDB, l_L1k)
if l_L1k and tax_id!=9606:
util.error_msg('L1k is only for tax_id 9606!')
if tax_id not in Cache.TOTAL_GENE_COUNT[s_key]:
Cache.load(tax_id=tax_id, l_use_GPDB=l_use_GPDB, l_L1k=l_L1k)
#if l_L1k:
# Cache.loadL1k()
#else:
# Cache.load(tax_id=tax_id, l_use_GPDB=l_use_GPDB)
return (Cache.CATEGORY[s_key],
Cache.GO_DESCRIPTION[s_key],
Cache.GO_CATEGORY[s_key],
# per tax_id, above are shared across tax_id
Cache.TOTAL_GENE_COUNT[s_key][tax_id],
Cache.ALL_GENE[s_key][tax_id],
Cache.CATEGORY_COUNT[s_key][tax_id],
Cache.GO_GENE_ENRICH[s_key][tax_id],
Cache.GO_GENE[s_key][tax_id],
Cache.GENE_GO[s_key][tax_id]
)
@staticmethod
def info():
for s_key in ('LOCAL','GPDB', 'L1k'):
print(">Databases: %s" % s_key)
print("CATEGORY=%d" % (0 if Cache.CATEGORY[s_key] is None else len(Cache.CATEGORY[s_key])))
print("GO_DESCRIPTION=%d" % (0 if Cache.GO_DESCRIPTION[s_key] is None else len(Cache.GO_DESCRIPTION[s_key])))
print("GO_CATEGORY=%d" % (0 if Cache.GO_CATEGORY[s_key] is None else len(Cache.GO_CATEGORY[s_key])))
for tax_id in Cache.TOTAL_GENE_COUNT[s_key].keys():
print("TAX_ID=%d (%s)" % (tax_id, ez.Cache.C_TAX_NAME.get(tax_id, "UNKNOWN")))
print("TOTAL_GENE_COUNT=%d" % Cache.TOTAL_GENE_COUNT[s_key][tax_id])
print("ALL_GENE=%d" % len(Cache.ALL_GENE[s_key][tax_id]))
print("CATEGORY_COUNT=%d" % len(Cache.CATEGORY_COUNT[s_key][tax_id]))
print("GO_GENE_ENRICH=%d" % len(Cache.GO_GENE_ENRICH[s_key][tax_id]))
print("GO_GENE=%d" % len(Cache.GO_GENE[s_key][tax_id]))
print("GENE_GO=%d" % len(Cache.GENE_GO[s_key][tax_id]))
print("")
@staticmethod
def unload(tax_id, l_use_GPDB, l_L1k):
if tax_id==-9606: tax_id=9606
s_key=Cache.key(l_use_GPDB, l_L1k)
if tax_id in Cache.TOTAL_GENE_COUNT[s_key]:
del Cache.CATEGORY_COUNT[s_key][tax_id]
del Cache.TOTAL_GENE_COUNT[s_key][tax_id]
del Cache.ALL_GENE[s_key][tax_id]
del Cache.GO_GENE_ENRICH[s_key][tax_id]
del Cache.GO_GENE[s_key][tax_id]
del Cache.GENE_GO[s_key][tax_id]
@staticmethod
def key(l_use_GPDB, l_L1k):
if l_L1k: return "L1k"
return 'GPDB' if l_use_GPDB else 'LOCAL'
@staticmethod
def load(tax_id=9606, l_use_GPDB=True, user_go=None, l_L1k=False):
"""tax_id is None, defaults to 9606, if 0, means load all supported species,
entrez_gene is only used in local mode to accelerate Symbol retrieval"""
if tax_id is None:
util.error_msg('tax_id must be an9606 int, or 0 mans all supported species')
tax_id=abs(tax_id)
s_key=Cache.key(l_use_GPDB, l_L1k=l_L1k)
if tax_id!=0 and tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[]
# performance optimization
if l_L1k: return Cache.loadL1k()
if not l_use_GPDB:
if tax_id not in (0,9606):
util.error_msg('Local database only supports human!')
tax_id=9606
if tax_id in Cache.TOTAL_GENE_COUNT[s_key]: return
S_tax_id=[tax_id]
else:
mydb=db.DB('METASCAPE')
if tax_id>0:
S_tax_id=[tax_id]
else:
t=mydb.from_sql('SELECT DISTINCT tax_id FROM gid2source_id')
S_tax_id=[x for x in t.tax_id.astype(int).tolist() if x not in Cache.TOTAL_GENE_COUNT[s_key]]
if len(S_tax_id)==0: return
s_tax_id=",".join(util.iarray2sarray(S_tax_id))
print("Load %s GO database for tax_id: %s ..." % (s_key, s_tax_id))
if l_use_GPDB:
s_where_L1k="term_category_id>=91" if l_L1k else "term_category_id<91"
if Cache.CATEGORY[s_key] is None:
t=mydb.from_sql("SELECT term_category_id,category_name FROM term_category where "+s_where_L1k)
Cache.CATEGORY[s_key] = {t.ix[i,'term_category_id']:t.ix[i,'category_name'] for i in t.index}
t=mydb.from_sql("SELECT t.term_id GO,term_name AS DESCRIPTION,term_category_id CATEGORY_ID FROM term t where "+s_where_L1k)
X=t.DESCRIPTION.isnull()
if sum(X):
t.ix[X, 'DESCRIPTION']=t.ix[X, 'GO']
#if not util.is_python3():
# t['DESCRIPTION']=t['DESCRIPTION'].apply(lambda x: unicode(x, encoding="ISO-8859-1", errors='ignore')) # L1000 has micro Mol
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['CATEGORY_ID']=t['CATEGORY_ID'].astype(int)
Cache.GO_CATEGORY[s_key]={re.sub(r'^\d+_', '', row.GO):int(row.CATEGORY_ID) for row in t.itertuples() }
if tax_id==0:
t=mydb.from_sql("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' group by tax_id")
else:
t=mydb.sql_in("SELECT COUNT(*) as N,tax_id FROM annotation a where a.annotation_type_id=3 AND content='protein-coding' and tax_id in (", ") group by tax_id", S_tax_id)
Cache.TOTAL_GENE_COUNT[s_key]=dict(zip(t.tax_id, t.N))
if tax_id==0:
t=mydb.from_sql("SELECT term_id GO,gids GENES,tax_id FROM term2gids where "+s_where_L1k)
else:
t=mydb.sql_in("SELECT term_id GO,gids GENES,tax_id FROM term2gids WHERE "+s_where_L1k+" and tax_id in (", ")", S_tax_id)
#tmp=t[t.GO.apply(lambda x: x.startswith('6'))]
#print tmp[:4]
else:
DATA_FILE=setting.go['DATA_FILE']
#TAX_ID,GeneID
t_gene=pd.read_csv(DATA_FILE)
t_gene=t_gene[t_gene.TAX_ID==tax_id]
C_GENE=set(t_gene['GeneID'].astype(str).tolist())
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(C_GENE)
if user_go is not None:
if os.path.isfile(user_go):
if user_go.upper().endswith(".CSV"):
t=pd.read_csv(user_go)
else:
t=pd.read_table(user_go)
elif os.path.isfile(Cache.DATA_DIR+"AllAnnotations.tsv"):
t=pd.read_csv(Cache.DATA_DIR+"AllAnnotations.tsv", sep='\t')
if t is None:
util.error_msg('No GO Annotations available.')
#GO TYPE GENES DESCRIPTION
S=util.unique(t.TYPE)
Cache.CATEGORY[s_key] = dict(zip(S, S))
Cache.GO_CATEGORY[s_key]=dict(zip(t.GO, t.TYPE))
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO, t.DESCRIPTION))
t['tax_id']=tax_id
for x in S_tax_id:
Cache.ALL_GENE[s_key][x]=set()
Cache.GENE_GO[s_key][x]={}
Cache.GO_GENE[s_key][x]={}
Cache.CATEGORY_COUNT[s_key][x]={}
Cache.GO_GENE_ENRICH[s_key][x]=set()
#sw=util.StopWatch("AAAAAAA")
for tax_id2,t_v in t.groupby('tax_id'):
#t_v=t_v.copy()
GENE_GO={}
GO_GENE={}
GO_GENE_ENRICH=set()
ALL_GENE=set()
CATEGORY_COUNT={}
s_cat=0
S_genes=[ (row.GO, row.GENES.split(",")) for row in t_v.itertuples() ]
if not l_use_GPDB:
S_genes=[ (x, [y for y in Y if (y in C_GENE)]) for x,Y in S_genes ]
GO_GENE={x: set(Y) for x,Y in S_genes if (len(Y)>0 and len(Y)<=Cache.N_TRIVIAL) }
GO_GENE_ENRICH=set(GO_GENE.keys())
if l_use_GPDB:
for x in GO_GENE_ENRICH:
if re.sub(r'^\d+_', '', x) not in Cache.GO_CATEGORY[s_key]:
print(">>>>>>>>>>>>>>>>>>>", x, s_key, re.sub(r'^\d+_', '', x))
exit()
S_cat=[ Cache.GO_CATEGORY[s_key][re.sub(r'^\d+_','', x)] for x in GO_GENE_ENRICH ]
else:
S_cat=[ Cache.GO_CATEGORY[s_key][x] for x in GO_GENE_ENRICH ]
CATEGORY_COUNT=util.unique_count(S_cat)
# reduce is slower
#ALL_GENE=reduce(lambda a,b : a|b, GO_GENE.values())
ALL_GENE=set([x for Y in GO_GENE.values() for x in Y])
#for row in t_v.itertuples():
##for i in t_v.index:
# s_go=row.GO #t_v.ix[i, 'GO']
# S_genes=row.GENES.split(",") #t_v.ix[i, 'GENES'].split(",")
# if not l_use_GPDB:
# ### warning, gene ids not recognized are treated as tax ID 0!!!
# S_genes=[s for s in S_genes if s in C_GENE]
# if len(S_genes)==0: continue
# if len(S_genes)<=Cache.N_TRIVIAL:
# GO_GENE_ENRICH.add(s_go)
# if l_use_GPDB:
# s_cat=Cache.GO_CATEGORY[s_key].get(re.sub(r'^\d+_','',s_go), 0)
# CATEGORY_COUNT[s_cat]=CATEGORY_COUNT.get(s_cat, 0)+1
# GO_GENE[s_go]=set(S_genes)
# ALL_GENE.update(GO_GENE[s_go])
#sw.check("TTTTTTTTT "+str(tax_id))
for k,v in GO_GENE.items():
for s_gene in v:
if s_gene not in GENE_GO:
GENE_GO[s_gene]={k}
else:
GENE_GO[s_gene].add(k)
Cache.ALL_GENE[s_key][tax_id2]=ALL_GENE
Cache.GENE_GO[s_key][tax_id2]=GENE_GO
Cache.TOTAL_GENE_COUNT[s_key][tax_id2]=max(Cache.TOTAL_GENE_COUNT[s_key][tax_id2], len(GENE_GO))
Cache.CATEGORY_COUNT[s_key][tax_id2]=CATEGORY_COUNT
Cache.GO_GENE[s_key][tax_id2]=GO_GENE
Cache.GO_GENE_ENRICH[s_key][tax_id2]=GO_GENE_ENRICH
if l_L1k:
s_path=setting.go['L1000_PATH']
S_gene=util.read_list(s_path+'/L1kAllGenes.txt')
Cache.ALL_GENE[s_key][tax_id]=set(S_gene)
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(S_gene)
@staticmethod
def loadL1k():
"""Load L1000 terms"""
sw=util.StopWatch()
print("Loading L1k terms ...")
tax_id=9606
s_key="L1k"
s_path=setting.go['L1000_PATH']
S_gene=util.read_list(s_path+"/L1kAllGenes.txt")
Cache.TOTAL_GENE_COUNT[s_key][tax_id]=len(S_gene)
t1=pd.read_csv(s_path+"/Term2Gid_L1000_PhaseI.csv")
t2=pd.read_csv(s_path+"/Term2Gid_L1000_PhaseII.csv")
t=pd.concat([t1, t2], ignore_index=True)
t['category_id']=t['category_id'].astype(int)
#t=t1[:600000]
Cache.ALL_GENE[s_key][tax_id]=set(S_gene)
Cache.GENE_GO[s_key][tax_id]={}
Cache.GO_GENE[s_key][tax_id]={}
Cache.CATEGORY_COUNT[s_key][tax_id]={}
Cache.GO_GENE_ENRICH[s_key][tax_id]=set()
sw.check('Loaded CSV')
s_type='BP'
Cache.CATEGORY[s_key]={91:'shRNA',92:'Cpd',93:'cDNA',94:'Lgnd'}
Cache.CATEGORY_COUNT[s_key][tax_id]={}
Cache.GO_CATEGORY[s_key]={}
Cache.GO_DESCRIPTION[s_key]={}
CATEGORY_COUNT={}
t['GO_ID']=[ str(row.category_id)+"_"+row.term_id for row in t.itertuples() ]
Cache.GO_CATEGORY[s_key]=dict(zip(t.GO_ID, t.category_id))
Cache.GO_DESCRIPTION[s_key]=dict(zip(t.GO_ID, t.term_name))
CATEGORY_COUNT=util.unique_count(t.category_id)
Cache.GO_GENE[s_key][tax_id]={ row.GO_ID:set(row.gids.split(",")) for row in t.itertuples() }
for s_go_id,S_gene in Cache.GO_GENE[s_key].items():
for s_gene in S_gene:
if s_gene not in Cache.GENE_GO[s_key][tax_id]:
Cache.GENE_GO[s_key][tax_id][s_gene]={s_go_id}
else:
Cache.GENE_GO[s_key][tax_id][s_gene].add(s_go_id)
##for i in t.index:
#for row in t.itertuples():
# i_cat=row.category_id #t.ix[i,'category_id']
# s_go=row.term_id #t.ix[i, 'term_id']
# s_des=row.term_name #t.ix[i,'term_name']
# S_gene=row.gids.split(",") #t.ix[i,'gids'].split(',')
# s_go_id=str(i_cat)+"_"+s_go
# Cache.GO_CATEGORY[s_key][s_go_id]=i_cat
# Cache.GO_DESCRIPTION[s_key][s_go_id]=s_des
# CATEGORY_COUNT[i_cat]=CATEGORY_COUNT.get(i_cat, 0)+1
# #if len(S_genes)<=Cache.N_TRIVIAL:
# Cache.GO_GENE[s_key][tax_id][s_go_id]=set(S_gene)
# for s_gene in S_gene:
# if s_gene not in Cache.GENE_GO[s_key][tax_id]:
# Cache.GENE_GO[s_key][tax_id][s_gene]={s_go_id}
# else:
# Cache.GENE_GO[s_key][tax_id][s_gene].add(s_go_id)
Cache.CATEGORY_COUNT[s_key][tax_id]=CATEGORY_COUNT
Cache.GO_GENE_ENRICH[s_key][tax_id]=set(Cache.GO_GENE[s_key][tax_id].keys())
sw.check("Done L1k")
print('Done L1k loading')
class GO(object):
GO_ROOT={"BP":"GO:0008150","MF":"GO:0003674","CC":"GO:0005575"}
#GO_DESCRIPTION={"BP":"Biological Process","MF":"Molecular Function","CC":"Cellular Component"}
# used for l_use_GPDB GO Categories
# GO: BP:19, MF:21, CC:20
# GeneGo: Pathway Map:27, Go Processes:31, Drug Target:29, Disease:28
# Custom: custom gene sets
# KEGG: pathway:24, MF: 26, CC:25
# MSigDB: Pathway:11, BioCarta:15, Hallmark:23, Reactome:6, Onc Set:4, Immu Set:3, Chem/Genetics 13
def get_source_id(self,term_id):
return re.sub(r'^\d*_','',term_id)
def get_category_id(self,term_id):
if re.search(r'^\d+_', term_id):
t = term_id.split('_')
return int(t[0])
else:
return self.GO_CATEGORY.get(term_id, None)
#the line below breaks on hsa_M00055
#return int(t[0]) if len(t)>1 else None
def get_categories(self):
if self.GPDB:
mydb=db.DB('METASCAPE')
t=mydb.from_sql("SELECT term_category_id,category_name,ds data_source FROM term_category")
return t
return None
@staticmethod
def get_go_categories():
mydb=db.DB('METASCAPE')
t=mydb.from_sql("select c.term_category_id,c.category_name,c.category_group,c.category_group_name_membership from term_category c where c.used_in_enrichment='Y' order by category_group,term_category_id")
return t
def __init__(self, tax_id=None, l_use_GPDB=False, entrez=None, l_L1k=False, r_random=0):
self.eg=entrez
self.GPDB=l_use_GPDB
self.tax_id=tax_id
self.L1k=l_L1k
(self.CATEGORY, self.GO_DESCRIPTION, self.GO_CATEGORY, self.TOTAL_GENE_COUNT, self.ALL_GENE, self.CATEGORY_COUNT, self.GO_GENE_ENRICH, self.GO_GENE, self.GENE_GO)=Cache.get(tax_id=tax_id, l_use_GPDB=l_use_GPDB, l_L1k=l_L1k)
if r_random>0:
self.GO_GENE_ENRICH=random.sample(self.GO_GENE_ENRICH, int(len(self.GO_GENE_ENRICH)*r_random))
def is_L1000(self):
return self.L1k
def go_description(self, s_go):
if s_go in self.GO_DESCRIPTION:
return self.GO_DESCRIPTION[s_go]
return s_go
def filter_genes_by_go(self, s_go, S_genes):
return [ x for x in S_genes if s_go in self.GENE_GO.get(x, []) ]
#if s_go in self.GO_GENE:
# return list(set(S_genes).intersection(self.GO_GENE[s_go]))
#else:
# return []
def go_size(self, s_go):
#return len([True for v in self.GENE_GO.values() if s_go in v ])
return len(self.GO_GENE.get(s_go, []))
def analysis_go(self, s_go, S_hit, N_total=0, SRC_GENE=None, min_overlap=3, p_cutoff=0.01):
c={'GO':s_go, '#TotalGeneInLibrary':N_total, '#GeneInGO':0, '#GeneInHitList':0, '#GeneInGOAndHitList':0, 'LogP':0.0, 'Enrichment':0}
#if SRC_GENE is not None:
# print "SRC_GENE: "+str(len(SRC_GENE))
S_gene=self.GO_GENE[s_go]
if len(S_gene)>=Cache.N_TRIVIAL:
return None
if not N_total:
N_total=len(self.GENE_GO.keys()) #len(self.ALL_GENE), only count genes that has GO annotation
if SRC_GENE is not None:
S_gene=S_gene.intersection(SRC_GENE)
S_hit=set(S_hit).intersection(SRC_GENE)
else:
S_hit=set(S_hit)
S_hit=self.ALL_GENE.intersection(S_hit)
c['#GeneInGO']=len(S_gene)
c['#GeneInHitList']=len(S_hit)
if c['#GeneInGO']<min_overlap or c['#GeneInHitList']<min_overlap:
return None
S_both=S_gene.intersection(S_hit)
c['#GeneInGOAndHitList']=len(S_both)
if c['#GeneInGOAndHitList']<min_overlap:
return None
c['%InGO']=c['#GeneInGOAndHitList']*100.0/c['#GeneInHitList']
q=min(max(c['%InGO']/100, 1.0/c['#GeneInHitList']), 1-1.0/c['#GeneInHitList'])
c['STDV %InGO']=np.sqrt(q*(1-q)/c['#GeneInHitList'])*100
c['Enrichment']=c['%InGO']/100.0*N_total/c['#GeneInGO']
S=[int(x) for x in S_both]
S.sort()
c['GeneID']='|'.join([str(x) for x in S])
c['LogP']=np.log10(max(
stats.hyper(c['#GeneInGOAndHitList'], N_total, c['#GeneInGO'], c['#GeneInHitList']), 1e-100))
# GeneGo Z-score definition
c['Z-score']=stats.ZScore_GeneGo(c['#GeneInGOAndHitList'], N_total, c['#GeneInGO'], c['#GeneInHitList'])
if c['LogP']>np.log10(p_cutoff): return None
return c
def analysis_go_RSA(self, s_go, S_hit, S_score, N_total=0, SRC_GENE=None, min_overlap=3, p_cutoff=0.01, l_keep_most=True):
"""Input is a list of hits with scores, the smaller the score, the better!
We then iteratively try different score cutoffs and use the cutoff that produce the best P-value (Bonferroni corrected)"""
c={'GO':s_go, '#TotalGeneInLibrary':N_total, '#GeneInGO':0, '#GeneInHitList':0, '#GeneInGOAndHitList':0, 'Cutoff':None, '#HitRemain':0, '#HitInGORemain':0, 'LogP':0.0, 'Enrichment':0}
#if SRC_GENE is not None:
# print "SRC_GENE: "+str(len(SRC_GENE))
S_gene=self.GO_GENE[s_go]
if len(S_gene)>=Cache.N_TRIVIAL:
return None
if not N_total:
N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation
t_hit=pd.DataFrame(data={'Hit':S_hit, 'Score':S_score})
if SRC_GENE is not None:
S_gene=S_gene.intersection(SRC_GENE)
t_hit=t_hit[ t_hit.Hit.apply(lambda x: x in SRC_GENE) ]
S_hit=set(t_hit.Hit)
else:
S_hit=set(S_hit)
t_hit.sort_values('Score', inplace=True)
c['#GeneInGO']=len(S_gene)
c['#GeneInHitList']=len(S_hit)
if c['#GeneInGO']<min_overlap or c['#GeneInHitList']<min_overlap:
return None
S_both=S_gene.intersection(S_hit)
c['#GeneInGOAndHitList']=len(S_both)
if c['#GeneInGOAndHitList']<min_overlap:
return None
I_index=np.arange(len(t_hit))[t_hit.Hit.apply(lambda x: x in S_gene).values]
I_rank=stats.RSA_rank(t_hit.Score.values, I_index)
rslt=stats.RSA_score(I_rank, N_total, l_BonferroniCorrection=True, l_keep_most=l_keep_most, p_cutoff=p_cutoff)
c['#HitInGORemain']=rslt["cutoff"]+1
if c['#HitInGORemain']<min_overlap: return None
c['#HitRemain']=I_rank[rslt["cutoff"]]
c['Cutoff']=t_hit.Score.values[rslt["cutoff"]]
c['%InGO']=c['#HitInGORemain']*100.0/c['#HitRemain']
q=min(max(c['%InGO']/100, 1.0/c['#HitRemain']), 1-1.0/c['#HitRemain'])
c['STDV %InGO']=np.sqrt(q*(1-q)/c['#HitRemain'])*100
c['Enrichment']=c['%InGO']/100.0*N_total/c['#GeneInGO']
S=[int(x) for x in S_both]
S.sort()
c['GeneID_All']='|'.join([str(x) for x in S])
S=[int(x) for x in list(t_hit.Hit[: rslt["cutoff"]+1])]
S.sort()
c['GeneID']='|'.join([str(x) for x in S])
c['LogP']=rslt['logP']
return c
def go_count(self, S_hit, S_go=None):
"""return a dict of GO and the number of genes appear in each GO
if S_go is provided, only counts for those go terms"""
c={}
if S_go is not None: S_go=set(S_go)
for x in S_hit:
Y=self.GENE_GO.get(x, [])
if S_go is not None: Y = set(Y).intersection(S_go)
for y in Y:
c[y]=c.get(y,0)+1
return c
def gene_count(self, S_go, S_gene=None):
"""return a dict of Gene and the number of GOs appear for each gene
if S_gene is provided, only counts for those genes
"""
c={}
if S_gene is not None: S_gene=set(S_gene)
for x in S_go:
Y=self.GO_GENE.get(x, [])
if S_gene is not None: Y = set(Y).intersection(S_gene)
for y in self.GO_GENE.get(x, []):
c[y]=c.get(y,0)+1
return c
def membership_count(self, S_go, S_gene):
"""return a dict of GO and the set of genes fall into those GO"""
return self.go_count(S_gene, S_go)
#c=self.go_count(S_gene)
#if type(S_go)!=set:
# S_go=set(S_go)
#c={ k:v for k,v in c.items() if k in S_go }
#return c
def membership_go_genes(self, S_go, S_gene):
c={}
S_gene=set(S_gene)
for x in S_go:
S=set(self.GO_GENE.get(x, [])).intersection(S_gene)
if len(S): c[x]=list(S)
return c
#gene_go = { x:self.GENE_GO.get(x, []) for x in S_gene}
#c={}
#for k,v in gene_go.items():
# for g in v:
# if g not in S_go:
# continue
# if g not in c:
# c[g] = []
# c[g].append(k)
#return c
def analysis(self, S_hit, S_score=None, S_go=None, SRC_GENE=None, min_overlap=3, min_enrichment=0, p_cutoff=0.01, n_CPU=0, l_rsa_keep_most=True, S_go_category=None, l_background_by_ontology=False):
"""If Score is None, just run GO enrichment test, if Score is provided, the smaller score represents hits are more reliable.
An iterative enrichment test, i.e., RSA routine is applied, see analysis_go_RSA is applied.
S_go_category: a set of categories to use, useful for gene prioritization project.
By default, we recommend [31,19,11,15,27,24]
Special usage: both S_hit and S_go are dict, {'a':S_hit1, 'b':S_hit2}, {'a':S_go1, 'b':S_go2}
Which is a short cut to run analysis(S_hit1, S_go1) and analysis(S_hit2, S_go2)
i.e., we analysis multiple hit lists, each has its own S_go list.
We pool them together, so that we can use CPUs more effectively, e.g, len(S_go1)==2 and len(S_go2)==10, we can run them in 12 CPUs once, instead of in two batches.
"""
def go_filtered(S_go, S_go_category):
return [x for x in S_go if self.get_category_id(x) in S_go_category]
S_all_go_filtered=[]
def all_go_filtered(S_go_category):
if len(S_all_go_filtered)==0:
S_go=self.GO_GENE_ENRICH
S_all_go_filtered.append(go_filtered(S_go, S_go_category))
return S_all_go_filtered[0]
N_go=0
if S_go_category is not None and len(S_go_category)>0:
# hard code for now, to be fixed later
if type(S_go_category) in (int, str):
S_go_category=[S_go_category]
S_go_category={int(x) for x in S_go_category if self.CATEGORY_COUNT.get(x,0)>0 }
for x in S_go_category:
N_go+=self.CATEGORY_COUNT[x]
else:
N_go=sum(self.CATEGORY_COUNT.values())
l_multi_list=type(S_hit) is dict
if S_go is None:
if l_multi_list:
S_go={}
for k in S_hit.keys():
S_go[k]=all_go_filtered(S_go_category)
else:
S_go=all_go_filtered(S_go_category)
else:
if l_multi_list:
for k in S_hit.keys():
if S_go.get(k, None) is None:
S_go[k]=all_go_filtered(S_go_category)
else:
S_go[k]=go_filtered(S_go[k], S_go_category)
else:
S_go=go_filtered(S_go, S_go_category)
if SRC_GENE is not None:
if type(SRC_GENE) is list:
SRC_GENE=set(SRC_GENE)
SRC_GENE=self.ALL_GENE.intersection(SRC_GENE) # remove genes from background, if it is not in self.ALL_GENE
N_total=len(SRC_GENE) #self.ALL_GENE.intersection(SRC_GENE))
elif l_background_by_ontology:
# GeneGo uses this
if l_multi_list:
X=set()
for x in S_go.values():
X.add(set(x))
src_genes=self.gene_count(list(X))
else:
src_genes=self.gene_count(S_go)
N_total=len(src_genes)
SRC_GENE=set(src_genes.keys())
else:
if self.is_L1000():
N_total=len(self.ALL_GENE)
else:
N_total=len(self.GENE_GO) #len(self.ALL_GENE), only count genes that has GO annotation
#N_total=len(self.ALL_GENE)
# prefiltering uninteresting GO terms
# already converted to multiple hit list situation
sw=util.StopWatch()
L=[] # list of (S_hit, s_go)
def spread_input(S_hit, S_go, key):
#S_hit, S_go, key=(X[0], X[1], X[2])
# may not worth it
#c_cnt=self.go_count(S_hit, S_go)
#S_go=[s_go for s_go in S_go if c_cnt.get(s_go,0)>=min_overlap ]
# minimum size
MIN_BATCH=2000
S_go2=util.split(S_go, chunk_size=MIN_BATCH)
return [(key, S_hit, x) for x in S_go2]
#sw.check('To spreadout')
if l_multi_list:
#mp=parallel.MP()
#m=1 if len(S_hit)<=3 else n_CPU
#mp.start(f=spread_input, n_CPU=m)
#L=[(X, S_go[k], k) for k,X in S_hit.items() if len(X)>=min_overlap]
#out=mp.map(L)
#L=[y for X in out for y in X]
L=[]
for k,X in S_hit.items():
if len(X)<min_overlap: continue
L.extend(spread_input(X, S_go[k], k))
random.shuffle(L)
else:
if len(S_hit)>=min_overlap:
L=spread_input(S_hit, S_go, 'Default')
if self.eg is None:
self.eg=ez.EntrezGene(tax_id=self.tax_id, l_use_GPDB=self.GPDB)
if n_CPU==0: n_CPU=1
#print ">>>>>>>>>>>>>>", len(L)
S_chunk=util.split(L, n_chunk=n_CPU)
#sw.check('Spreadout tasks: %d' % len(L))
def analyze(L):
"""L is a list of [[s_name, S_hit, s_go]], s_go can also be a list"""
rslt=[]
#p=util.Progress(len(L))
i=0
import multiprocessing
s_pid=str(multiprocessing.current_process().pid)
for s_name, S_hit, S_go in L:
i+=1
#if (i % 50000): p.check(i, s_pid)
if type(S_go) is str: S_go=[S_go]
for s_go in S_go:
if s_go not in self.GO_GENE: continue
if S_score is None:
c=self.analysis_go(s_go, S_hit, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff)
else:
c=self.analysis_go_RSA(s_go, S_hit, S_score, N_total, SRC_GENE=SRC_GENE, min_overlap=min_overlap, p_cutoff=p_cutoff, l_keep_most=l_rsa_keep_most)
if c is None:
continue
c['Name']=s_name
if min_enrichment>0 and c['Enrichment']<min_enrichment: continue
if p_cutoff<1 and 10**c['LogP']>p_cutoff: continue
c['Description']= self.go_description(s_go)
S_gene=c['GeneID'].split('|')
S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]
c['Hits']='|'.join(S_symbol)
if 'GeneID_All' in c:
S_gene=c['GeneID_All'].split('|')
S_symbol=[self.eg.C_GENENEW[x] if x in self.eg.C_GENENEW else x for x in S_gene]
c['Hits_All']='|'.join(S_symbol)
if self.GPDB:
c['CategoryID'] = self.get_category_id(c['GO'])
c['Category'] = self.CATEGORY.get(self.get_category_id(c['GO']))
c['GO'] = self.get_source_id(c['GO'])
rslt.append(c)
return rslt
out=parallel.parmap(analyze, S_chunk, n_CPU=n_CPU)
#if n_CPU>1:
# mp=parallel.MP()
# mp.start(f=analyze, n_CPU=n_CPU)
# out=mp.map(S_chunk)
#else:
# out=[analyze(x) for x in S_chunk]
#mp.start(n_CPU=n_CPU)
#sw.check('P-value Calculation')
#sw.check('P-value Calculation Done')
rslt=[]
for x in out:
if len(x): rslt.extend(x)
if len(rslt):
#sw.check('Length: %d' % len(rslt))
t=pd.DataFrame(rslt)
#sw.check('Table DONE')
if S_score is None:
t=t.sort_values(['LogP','Enrichment','#GeneInGOAndHitList'], ascending=[True,False,False])
cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',
'#GeneInGO','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits']
else:
t=t.sort_values(['LogP','Enrichment','#HitInGORemain','#GeneInGOAndHitList'], ascending=[True,False,False,False])
cols = ['Name','GO','Description','LogP','Enrichment','Z-score','#TotalGeneInLibrary',
'#GeneInGO','#HitRemain','#HitInGORemain','Cutoff','#GeneInHitList','#GeneInGOAndHitList','%InGO','STDV %InGO','GeneID','Hits','GeneID_All','Hits_All']
if self.GPDB:
#cols.insert(1,'field1')
cols.insert(1,'CategoryID')
cols.insert(1,'Category')
#sw.check('sorted DONE')
t=t.reindex(columns=cols)
# FDR
#print ">>> N_go: ", N_go
#sw.check('reindex DONE')
t['Log(q-value)']=np.log10(np.clip(stats.adjust_p(np.power(10, t.LogP.values), N=N_go), 1e-100, 1.0))
#sw.check('q-value DONE')
if not l_multi_list:
t.drop('Name', axis=1, inplace=True)
return t
else:
return None
def key_terms(self, t_old, t_new, t_union, S_old=None, t_over=None):
"""Look for terms presented in both list and the p-valeu is even better in the combined list
S_old is the set of genes in Old set, if None, set to all genes in t_old table
This method is to be used by analyze_key_terms."""
if t_old is None or t_new is None or t_union is None:
return None
print("Old: %d, New: %d, Union: %d" % (len(t_old), len(t_new), len(t_union)))
if S_old is None:
S_old=set([y for x in [ t_old.ix[i, 'GeneID'].split("|") for i in t_old.index ] for y in x])
elif type(S_old) is list:
S_old=set(S_old)
t_old=t_old[["GO","LogP"]]
t_old.rename2({"LogP":"LogP_Hit"})
t_new=t_new[["GO","LogP"]]
t_new.rename2({"LogP":"LogP_OverConnect"})
#t_old.rename2({"LogP":"LogP_Union"})
t=pd.merge(t_union, t_old, on="GO")
if len(t)==0: return None
t= | pd.merge(t, t_new, on="GO") | pandas.merge |
import json
import os
import geocoder
import pandas as pd
import plotly.express as px
from plotly.colors import label_rgb as rgb
import streamlit as st
st.title('Oldways Data Analyzer')
mode_selection = st.sidebar.selectbox('App Mode:', ('Filtered Analysis', 'Automatic Analysis'))
'### Inputs'
excel_file = st.file_uploader(label='Excel File to Analyze', type=['xlsx'])
sheet_name = 'Student Lifestyle Surveys'
header_row = 23
weight_sheet_name = 'Total Weight Loss' # Spreadsheet name for weight data
weight_header_row = 8 # Header row for weight data
bp_sheet_name = "Blood Pressure" # Spreadsheet name for bp data
bp_header_row = 5 # Header row for bp data
waist_sheet_name = "Waist Circumference" # Spreadsheet for waist data
waist_header_row = 7 # Header row for waist data
def mean(data):
'''
Compute the average value of the given data
'''
return sum(data) / len(data)
topics = ['Cooking Frequency', 'Herbs and Spices', 'Greens', 'Whole Grains', 'Beans', 'Tubers',
'Vegetables',
'Fruits',
'Vegetarian-Based Meals', 'Exercise']
def get_df_percentages(df):
percentage_increase = []
percentage_same = []
num_students = []
for i in range(len(topics)):
# Create header names
pre_string = "Pre"
pre_name = "Pre - Num"
post_name = "Post Num"
if i != 0: # artifact of how spreadsheet is formatted
pre_name += ("." + str(i))
post_name += ("." + str(i))
pre_string += ("." + str(i))
pre_post = df[[pre_name, post_name, pre_string]]
pre_post["Difference"] = pre_post[post_name] - pre_post[pre_name]
pre_post.dropna(inplace=True) # drops the blank lines (they didn't answer)
total_num = len(pre_post)
increase_num = len(pre_post[pre_post['Difference'] > 0])
same_num = len(pre_post[pre_post['Difference'] == 0])
percentage_increase.append(100 * increase_num / total_num)
percentage_same.append(100 * same_num / total_num)
num_students.append(total_num)
return mean(percentage_increase), mean(percentage_same), mean(num_students)
def compute_percentage(data, target):
'''
Compute the percentage of data points meeting the given target
test function
'''
count = 0
for point in data:
if target(point):
count += 1
return 100 * count / len(data)
@st.cache
def load_sheet(excel_file, sheet_name, header_row):
return pd.read_excel(excel_file, sheet_name, header=header_row)
if excel_file is not None:
df = load_sheet(excel_file, sheet_name, header_row) # Load all dataframes
if mode_selection == 'Automatic Analysis':
with st.beta_expander('Health Statistics'): # Expandable info about health
df_health = load_sheet(excel_file, weight_sheet_name, weight_header_row)
df_bp = load_sheet(excel_file, bp_sheet_name, bp_header_row)
df_waist = load_sheet(excel_file, waist_sheet_name, waist_header_row)
weight_loss_his = mean(
df_health["Weight Change lbs."]) # Changes in Weight (Overall, Male, Female)
average_weight_loss = f"{weight_loss_his:.2f}"
weight_loss_fig = px.histogram(df_health["Weight Change lbs."], title='Changes in Weight (Overall)',
labels={'value': 'Weight Lost', 'count': 'Count'})
st.plotly_chart(weight_loss_fig)
males = df_health.loc[df_health['Sex'] == 'M']
weight_loss_his_m = px.histogram(males['Weight Change lbs.'], title='Changes in Weight (Male)',
labels={'value': 'Weight Lost', 'count': 'Count'})
average_male_loss = mean(males["Weight Change lbs."])
average_male_loss = f"{average_male_loss:.2f}"
females = df_health.loc[df_health['Sex'] == 'F']
weight_loss_his_f = px.histogram(females['Weight Change lbs.'], title='Changes in Weight (Female)',
labels={'value': 'Weight Lost', 'count': 'Count'})
st.plotly_chart(weight_loss_his_f)
st.plotly_chart(weight_loss_his_m)
average_female_loss = mean(females["Weight Change lbs."])
average_female_loss = f"{average_female_loss:.2f}"
st.success(
(f'On average, the **{len(df_health["Weight Change lbs."])}** students lost **{average_weight_loss}** '
f'pounds. Of these, the **{len(females)}** females lost an average of **{average_female_loss}** pounds'
f' while the **{len(males)}** males lost an average of **{average_male_loss}** pounds. '))
percent_bp = [] # Changes in Blood Pressure
percent_bp_improve = compute_percentage(df_bp["Change in New HPB Rating"], lambda x: x == 'Decrease')
percent_bp.append(percent_bp_improve)
percent_bp_improve = f"{percent_bp_improve:.2f}"
percent_bp_same = compute_percentage(df_bp["Change in New HPB Rating"], lambda x: x == 'No Change')
percent_bp.append(percent_bp_same)
percent_bp.append(100 - percent_bp[0] - percent_bp[1])
percent_bp_labels = ['Decrease', "No Change", 'Increase']
percent_bp_same = f"{percent_bp_same:.2f}"
average_sys_bp = mean(df_bp["Change in Sys BP"])
average_dia_bp = mean(df_bp["Change in Dia BP"])
average_sys_bp = f"{average_sys_bp:.2f}"
average_dia_bp = f"{average_dia_bp:.2f}"
bp_fig = px.pie(title='Changes in Blood Pressure Stages', values=percent_bp, names=percent_bp_labels)
bp_fig.update_traces(textposition='inside', textinfo='label+percent')
st.plotly_chart(bp_fig)
st.success((f'**{percent_bp_improve}%** of students improved their blood pressure by at least one stage'
f' while **{percent_bp_same}%** of students saw no change in blood pressure. On average, students'
f' saw an average improvement of **{average_sys_bp}** in systolic blood pressure and **{average_dia_bp}**'
f' in diastolic blood pressure'))
percent_waist = [] # Changes in Waist
waist_lost = compute_percentage(df_waist["Inches Lossed"], lambda x: x > 0)
percent_waist.append(waist_lost)
waist_lost = f"{waist_lost:.2f}"
waist_same = compute_percentage(df_waist["Inches Lossed"], lambda x: x == 0)
percent_waist.append(waist_same)
percent_waist.append(100 - percent_waist[0] - percent_waist[1])
waist_label = ['Lost', 'No Change', 'Gained']
waist_fig = px.pie(title='Changes in Waist Inches', values=percent_waist, names=waist_label)
waist_fig.update_traces(textposition='inside', textinfo='label+percent')
st.plotly_chart(waist_fig)
waist_same = f"{waist_same:.2f}"
average_waist = mean(df_waist["Inches Lossed"])
average_waist = f"{average_waist:.2f}"
st.success((f'On average, the **{len(df_waist["Inches Lossed"])}** students lost '
f'**{average_waist}** inches on their waist, with **{waist_lost}%** of students '
f'seeing improved results and **{waist_same}%** of students seeing no changes. '))
with st.beta_expander('Teacher Analysis'):
@st.cache(suppress_st_warning=True)
def analyze_teachers():
teachers = list(df['Teacher Name'].unique())
teacher_increases = []
teacher_num_students = []
teacher_progress = st.progress(0.)
for i, teacher in enumerate(teachers):
teacher_df = df[df['Teacher Name'] == teacher]
increase, same, num_students = get_df_percentages(teacher_df)
teacher_increases.append(increase)
teacher_num_students.append(num_students)
teacher_progress.progress((i + 1.) / len(teachers))
sort_teachers = [teachers for _, teachers in sorted(zip(teacher_increases, teachers), reverse=True)]
sort_num_students = [teachers for _, teachers in
sorted(zip(teacher_increases, teacher_num_students), reverse=True)]
sort_increases = sorted(teacher_increases, reverse=True)
return sort_teachers, sort_num_students, sort_increases
sort_teachers, sort_num_students, sort_increases = analyze_teachers()
display_df = pd.DataFrame()
display_df['Teacher'] = sort_teachers
display_df['Average % Increase'] = sort_increases
display_df['Average # of Students'] = sort_num_students
display_df
'*Note:* Average % increase reflects the average improval rate of students in the categories of ' + ', '.join(topics)
if mode_selection == 'Filtered Analysis':
'### Filters'
# Filter Years
min_year, max_year = int(df["Class End Date (year)"].min()), int(df["Class End Date (year)"].max())
start_year, end_year = st.slider(label='Class Years', value=(min_year, max_year), min_value=min_year,
max_value=max_year)
df = df.loc[(start_year <= df['Class End Date (year)']) & (df['Class End Date (year)'] <= end_year)]
# Filter Teachers
teachers = st.multiselect(default=['All'], label='Teachers',
options=['All'] + list(df['Teacher Name'].unique()))
if 'All' not in teachers:
df = df[df['Teacher Name'].isin(teachers)]
locations = df[["Class Type", "Teacher Name", "Class End Date (year)", "Class Location Type", "City", "State"]]
locations.drop_duplicates(inplace=True)
seen = {} if not os.path.exists('loaction_dump.json') else json.load(open('loaction_dump.json', 'r'))
lat = []
lng = []
location_names = []
for city, state in zip(locations['City'], locations['State']):
loc_str = f'{city}, {state}'
if loc_str not in seen:
geocode = geocoder.google(loc_str)
seen[loc_str] = {'lat': geocode.latlng[0], 'lng': geocode.latlng[1]}
lat.append(seen[loc_str]['lat'])
lng.append(seen[loc_str]['lng'])
location_names.append(loc_str)
try:
json.dump(seen, open('loaction_dump.json', 'w'))
except PermissionError:
pass
locations['lat'] = lat
locations['lng'] = lng
locations['Location'] = location_names
locations['# of Classes'] = locations.groupby(['lat', 'lng'])['lat'].transform('count')
fig = px.scatter_geo(locations, lat="lat", lon='lng', size='# of Classes',
projection='albers usa',
hover_data={'lat': False, 'lng': False, '# of Classes': True, 'Location': True},
title='Class Locations', size_max=25)
# fig.show()
'## Analysis'
with st.beta_expander('General Statistics'):
st.success(f'There have been **{len(locations)}** classes taught with these filter options.')
st.success(f'The classes were taught in **{len(set(location_names))}** different cities.')
st.plotly_chart(fig, use_container_width=True)
st.success(f'These classes reached **{len(df)}** students.')
heritage_counts = df["History & Heritage Positive Motivators?"].str.lower().value_counts()
yes = heritage_counts.get('yes', 1)
no = heritage_counts.get('no', 0)
st.success(f'**{100 * yes / (yes + no):.2f}%** of the '
f'{(yes + no)} students surveyed, said heritage/history '
f'are positive motivators for health.')
with st.beta_expander('Improvements'):
data_view = st.radio('How would you like to view the data?', ('% of People', '# of People'))
topics = ['Cooking Frequency', 'Herbs and Spices', 'Greens', 'Whole Grains', 'Beans', 'Tubers',
'Vegetables',
'Fruits',
'Vegetarian-Based Meals', 'Exercise']
percentages = []
for i in range(len(topics)):
# Create header names
pre_string = "Pre"
pre_name = "Pre - Num"
post_name = "Post Num"
if i != 0: # artifact of how spreadsheet is formatted
pre_name += ("." + str(i))
post_name += ("." + str(i))
pre_string += ("." + str(i))
pre_post = df[[pre_name, post_name, pre_string]]
pre_post["Difference"] = pre_post[post_name] - pre_post[pre_name]
pre_post.dropna(inplace=True) # drops the blank lines (they didn't answer)
total_num = len(pre_post)
increase_num = len(pre_post[pre_post['Difference'] > 0])
same_num = len(pre_post[pre_post['Difference'] == 0])
if '#' == data_view[0]:
percent_increase = increase_num
percent_same = same_num
percentages.append([percent_increase, 'Increased', topics[i]])
percentages.append([percent_same, 'No Change', topics[i]])
percentages.append([total_num - percent_increase - percent_same, 'Decreased', topics[i]])
else:
percent_increase = round(100 * increase_num / total_num, 2)
percent_same = round(100 * same_num / total_num, 2)
percentages.append([percent_increase, 'Increased', topics[i]])
percentages.append([percent_same, 'No Change', topics[i]])
percentages.append([100 - percent_increase - percent_same, 'Decreased', topics[i]])
percentage_df = | pd.DataFrame(percentages, columns=[f'{data_view[0]} of People', 'Change', 'Category']) | pandas.DataFrame |
"""Objects related to pandas dataframes."""
from typing import Tuple, Union, List, Any, Dict, Optional, cast, Callable
import numpy as np
import pandas as pd
from pandas.api.types import union_categoricals
from owid.datautils.common import ExceptionFromDocstring, warn_on_list_of_entities
class DataFramesHaveDifferentLengths(ExceptionFromDocstring):
"""Dataframes cannot be compared because they have different number of rows."""
class ObjectsAreNotDataframes(ExceptionFromDocstring):
"""Given objects are not dataframes."""
def compare(
df1: pd.DataFrame,
df2: pd.DataFrame,
columns: Optional[List[str]] = None,
absolute_tolerance: float = 1e-8,
relative_tolerance: float = 1e-8,
) -> pd.DataFrame:
"""Compare two dataframes element by element to see if they are equal.
It assumes that nans are all identical, and allows for certain absolute and relative tolerances for the comparison
of floats.
NOTE: Dataframes must have the same number of rows to be able to compare them.
Parameters
----------
df1 : pd.DataFrame
First dataframe.
df2 : pd.DataFrame
Second dataframe.
columns : list or None
List of columns to compare (they both must exist in both dataframes). If None, common columns will be compared.
absolute_tolerance : float
Absolute tolerance to assume in the comparison of each cell in the dataframes. A value a of an element in df1 is
considered equal to the corresponding element b at the same position in df2, if:
abs(a - b) <= absolute_tolerance
relative_tolerance : float
Relative tolerance to assume in the comparison of each cell in the dataframes. A value a of an element in df1 is
considered equal to the corresponding element b at the same position in df2, if:
abs(a - b) / abs(b) <= relative_tolerance
Returns
-------
compared : pd.DataFrame
Dataframe of booleans, with as many rows as df1 and df2, and as many columns as specified by `columns` argument
(or as many common columns between df1 and df2, if `columns` is None). The (i, j) element is True if df1 and f2
have the same value (for the given tolerances) at that same position.
"""
# Ensure dataframes can be compared.
if (type(df1) != pd.DataFrame) or (type(df2) != pd.DataFrame):
raise ObjectsAreNotDataframes
if len(df1) != len(df2):
raise DataFramesHaveDifferentLengths
# If columns are not specified, assume common columns.
if columns is None:
columns = sorted(set(df1.columns) & set(df2.columns))
# Compare, column by column, the elements of the two dataframes.
compared = pd.DataFrame()
for col in columns:
if (df1[col].dtype == object) or (df2[col].dtype == object):
# Apply a direct comparison for strings.
compared_row = df1[col].values == df2[col].values
else:
# For numeric data, consider them equal within certain absolute and relative tolerances.
compared_row = np.isclose(
df1[col].values,
df2[col].values,
atol=absolute_tolerance,
rtol=relative_tolerance,
)
# Treat nans as equal.
compared_row[pd.isnull(df1[col].values) & pd.isnull(df2[col].values)] = True # type: ignore
compared[col] = compared_row
return compared
def are_equal(
df1: pd.DataFrame,
df2: pd.DataFrame,
absolute_tolerance: float = 1e-8,
relative_tolerance: float = 1e-8,
verbose: bool = True,
) -> Tuple[bool, pd.DataFrame]:
"""Check whether two dataframes are equal.
It assumes that all nans are identical, and compares floats by means of certain absolute and relative tolerances.
Parameters
----------
df1 : pd.DataFrame
First dataframe.
df2 : pd.DataFrame
Second dataframe.
absolute_tolerance : float
Absolute tolerance to assume in the comparison of each cell in the dataframes. A value a of an element in df1 is
considered equal to the corresponding element b at the same position in df2, if:
abs(a - b) <= absolute_tolerance
relative_tolerance : float
Relative tolerance to assume in the comparison of each cell in the dataframes. A value a of an element in df1 is
considered equal to the corresponding element b at the same position in df2, if:
abs(a - b) / abs(b) <= relative_tolerance
verbose : bool
True to print a summary of the comparison of the two dataframes.
Returns
-------
are_equal : bool
True if the two dataframes are equal (given the conditions explained above).
compared : pd.DataFrame
Dataframe with the same shape as df1 and df2 (if they have the same shape) that is True on each element where
both dataframes have equal values. If dataframes have different shapes, compared will be empty.
"""
# Initialise flag that is True only if both dataframes are equal.
equal = True
# Initialise flag that is True if dataframes can be compared cell by cell.
can_be_compared = True
# Initialise string of messages, which will optionally be printed.
summary = ""
# Check if all columns in df2 are in df1.
missing_in_df1 = sorted(set(df2.columns) - set(df1.columns))
if len(missing_in_df1):
summary += f"\n* {len(missing_in_df1)} columns in df2 missing in df1.\n"
summary += "\n".join([f" * {col}" for col in missing_in_df1])
equal = False
# Check if all columns in df1 are in df2.
missing_in_df2 = sorted(set(df1.columns) - set(df2.columns))
if len(missing_in_df2):
summary += f"\n* {len(missing_in_df2)} columns in df1 missing in df2.\n"
summary += "\n".join([f" * {col}" for col in missing_in_df2])
equal = False
# Check if dataframes have the same number of rows.
if len(df1) != len(df2):
summary += f"\n* {len(df1)} rows in df1 and {len(df2)} rows in df2."
equal = False
can_be_compared = False
# Check for differences in column names or types.
common_columns = sorted(set(df1.columns) & set(df2.columns))
all_columns = sorted(set(df1.columns) | set(df2.columns))
if common_columns == all_columns:
if df1.columns.tolist() != df2.columns.tolist():
summary += "\n* Columns are sorted differently.\n"
equal = False
for col in common_columns:
if df1[col].dtype != df2[col].dtype:
summary += (
f" * Column {col} is of type {df1[col].dtype} for df1, but type"
f" {df2[col].dtype} for df2."
)
equal = False
else:
summary += (
f"\n* Only {len(common_columns)} common columns out of"
f" {len(all_columns)} distinct columns."
)
equal = False
if not can_be_compared:
# Dataframes cannot be compared.
compared = pd.DataFrame()
equal = False
else:
# Check if indexes are equal.
if (df1.index != df2.index).any():
summary += (
"\n* Dataframes have different indexes (consider resetting indexes of"
" input dataframes)."
)
equal = False
# Dataframes can be compared cell by cell (two nans on the same cell are considered equal).
compared = compare(
df1,
df2,
columns=common_columns,
absolute_tolerance=absolute_tolerance,
relative_tolerance=relative_tolerance,
)
all_values_equal = compared.all().all()
if not all_values_equal:
summary += (
"\n* Values differ by more than the given absolute and relative"
" tolerances."
)
# Dataframes are equal only if all previous checks have passed.
equal = equal & all_values_equal
if equal:
summary += (
"Dataframes are identical (within absolute tolerance of"
f" {absolute_tolerance} and relative tolerance of {relative_tolerance})."
)
if verbose:
# Optionally print the summary of the comparison.
print(summary)
return equal, compared
def groupby_agg(
df: pd.DataFrame,
groupby_columns: Union[List[str], str],
aggregations: Union[Dict[str, Any], None] = None,
num_allowed_nans: Union[int, None] = 0,
frac_allowed_nans: Union[float, None] = None,
) -> pd.DataFrame:
"""Group dataframe by certain columns, and aggregate using a certain method, and decide how to handle nans.
This function is similar to the usual
> df.groupby(groupby_columns).agg(aggregations)
However, pandas by default ignores nans in aggregations. This implies, for example, that
> df.groupby(groupby_columns).sum()
will treat nans as zeros, which can be misleading.
When both num_allowed_nans and frac_allowed_nans are None, this function behaves like the default pandas behaviour
(and nans will be treated as zeros).
On the other hand, if num_allowed_nans is not None, then a group will be nan if the number of nans in that group is
larger than num_allowed_nans, otherwise nans will be treated as zeros.
Similarly, if frac_allowed_nans is not None, then a group will be nan if the fraction of nans in that group is
larger than frac_allowed_nans, otherwise nans will be treated as zeros.
If both num_allowed_nans and frac_allowed_nans are not None, both conditions are applied. This means that, each
group must have a number of nans <= num_allowed_nans, and a fraction of nans <= frac_allowed_nans, otherwise that
group will be nan.
Note: This function won't work when using multiple aggregations for the same column (e.g. {'a': ('sum', 'mean')}).
Parameters
----------
df : pd.DataFrame
Original dataframe.
groupby_columns : list or str
List of columns to group by. It can be given as a string, if it is only one column.
aggregations : dict or None
Aggregations to apply to each column in df. If None, 'sum' will be applied to all columns.
num_allowed_nans : int or None
Maximum number of nans that are allowed in a group.
frac_allowed_nans : float or None
Maximum fraction of nans that are allowed in a group.
Returns
-------
grouped : pd.DataFrame
Grouped dataframe after applying aggregations.
"""
if type(groupby_columns) == str:
groupby_columns = [groupby_columns]
if aggregations is None:
columns_to_aggregate = [
column for column in df.columns if column not in groupby_columns
]
aggregations = {column: "sum" for column in columns_to_aggregate}
# Group by and aggregate.
grouped = df.groupby(groupby_columns, dropna=False).agg(aggregations)
if num_allowed_nans is not None:
# Count the number of missing values in each group.
num_nans_detected = df.groupby(groupby_columns, dropna=False).agg(
lambda x: pd.isnull(x).sum()
)
# Make nan any aggregation where there were too many missing values.
grouped = grouped[num_nans_detected <= num_allowed_nans]
if frac_allowed_nans is not None:
# Count the number of missing values in each group.
num_nans_detected = df.groupby(groupby_columns, dropna=False).agg(
lambda x: pd.isnull(x).sum()
)
# Count number of elements in each group (avoid using 'count' method, which ignores nans).
num_elements = df.groupby(groupby_columns, dropna=False).size()
# Make nan any aggregation where there were too many missing values.
grouped = grouped[
num_nans_detected.divide(num_elements, axis="index") <= frac_allowed_nans
]
return grouped
def multi_merge(
dfs: List[pd.DataFrame], on: Union[List[str], str], how: str = "inner"
) -> pd.DataFrame:
"""Merge multiple dataframes.
This is a helper function when merging more than two dataframes on common columns.
Parameters
----------
dfs : list
Dataframes to be merged.
on : list or str
Column or list of columns on which to merge. These columns must have the same name on all dataframes.
how : str
Method to use for merging (with the same options available in pd.merge).
Returns
-------
merged : pd.DataFrame
Input dataframes merged.
"""
merged = dfs[0].copy()
for df in dfs[1:]:
merged = pd.merge(merged, df, how=how, on=on)
return merged
def map_series(
series: pd.Series,
mapping: Dict[Any, Any],
make_unmapped_values_nan: bool = False,
warn_on_missing_mappings: bool = False,
warn_on_unused_mappings: bool = False,
show_full_warning: bool = False,
) -> pd.Series:
"""Map values of a series given a certain mapping.
This function does almost the same as
> series.map(mapping)
However, map() translates values into nan if those values are not in the mapping, whereas this function allows to
optionally keep the original values.
This function should do the same as
> series.replace(mapping)
However .replace() becomes very slow on big dataframes.
Parameters
----------
series : pd.Series
Original series to be mapped.
mapping : dict
Mapping.
make_unmapped_values_nan : bool
If true, values in the series that are not in the mapping will be translated into nan; otherwise, they will keep
their original values.
warn_on_missing_mappings : bool
True to warn if elements in series are missing in mapping.
warn_on_unused_mappings : bool
True to warn if the mapping contains values that are not present in the series. False to ignore.
show_full_warning : bool
True to print the entire list of unused mappings (only relevant if warn_on_unused_mappings is True).
Returns
-------
series_mapped : pd.Series
Mapped series.
"""
# Translate values in series following the mapping.
series_mapped = series.map(mapping)
if not make_unmapped_values_nan:
# Rows that had values that were not in the mapping are now nan.
missing = series_mapped.isnull()
if missing.any():
# Replace those nans with their original values.
series_mapped.loc[missing] = series[missing]
if warn_on_missing_mappings:
unmapped = set(series) - set(mapping)
if len(unmapped) > 0:
warn_on_list_of_entities(
unmapped,
f"{len(unmapped)} missing values in mapping.",
show_list=show_full_warning,
)
if warn_on_unused_mappings:
unused = set(mapping) - set(series)
if len(unused) > 0:
warn_on_list_of_entities(
unused,
f"{len(unused)} unused values in mapping.",
show_list=show_full_warning,
)
return series_mapped
def concatenate(dfs: List[pd.DataFrame], **kwargs: Any) -> pd.DataFrame:
"""Concatenate while preserving categorical columns.
Original source code from https://stackoverflow.com/a/57809778/1275818.
"""
# Iterate on categorical columns common to all dfs
for col in set.intersection(
*[set(df.select_dtypes(include="category").columns) for df in dfs]
):
# Generate the union category across dfs for this column
uc = | union_categoricals([df[col] for df in dfs]) | pandas.api.types.union_categoricals |
#!/usr/bin/env python3
# =============================================================================
# Date: November, 2019
# Author: <NAME>.
# Purpose: Creates a csv file with yearly land cover information for every
# fire pixel identified during each year.
# =============================================================================
import os
import numpy as np
import pandas as pd
from code.functions import create_data_array, get_nodata_value
from code.variables import landcovers
if __name__ == '__main__':
# change directory
os.chdir('../../data/tif/MODIS/MCD12Q1/prepared')
# create landcover DataArray
years = pd.date_range('2002', '2016', freq='AS').year.astype('str')
data = create_data_array('.', years)
nd = get_nodata_value('.')
# create empty DataFrame
cols = ['year', 'code', 'pixels', 'proportion']
df = pd.DataFrame(columns=cols)
for i, year in enumerate(years):
# filter DataArray by year and get pixel count by landcover
arr = data.loc[year].values
mask = (arr != 0) & (arr != nd)
values, counts = np.unique(arr[mask], return_counts=True)
# create year's DataFrame
year_df = | pd.DataFrame(columns=cols) | pandas.DataFrame |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import MissingPandasLikeIndex, MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0],},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5), index=pd.date_range("2011-01-01", freq="D", periods=10)
),
pd.DataFrame(np.random.randn(10, 5), columns=list("abcde")).set_index(["a", "b"]),
]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = "databricks"
expected_error_message = "'Index' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = "databricks"
expected_error_message = "'MultiIndex' object has no attribute '{}'".format(item)
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
# With name
pidx.name = "Koalas"
kidx.name = "Koalas"
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name=("x", "a"))), repr(pidx.to_series(name=("x", "a"))))
# With tupled name
pidx.name = ("x", "a")
kidx.name = ("x", "a")
self.assert_eq(repr(kidx.to_series()), repr(pidx.to_series()))
self.assert_eq(repr(kidx.to_series(name="a")), repr(pidx.to_series(name="a")))
self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name="a"), pidx.to_series(name="a"))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = "a"
kidx.name = "a"
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name="x")), repr(pidx.to_frame(name="x")))
self.assert_eq(
repr(kidx.to_frame(index=False, name="x")),
repr(pidx.to_frame(index=False, name="x")),
)
pidx = self.pdf.set_index("b", append=True).index
kidx = self.kdf.set_index("b", append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
# The `name` argument is added in pandas 0.24.
self.assert_eq(
repr(kidx.to_frame(name=["x", "y"])), repr(pidx.to_frame(name=["x", "y"]))
)
self.assert_eq(
repr(kidx.to_frame(index=False, name=["x", "y"])),
repr(pidx.to_frame(index=False, name=["x", "y"])),
)
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
pdf = pd.DataFrame(np.random.randn(10, 5), index=idx, columns=list("abcde"))
kdf = ks.from_pandas(pdf)
pser = pdf.a
kser = kdf.a
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = "renamed"
kidx.name = "renamed"
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
self.assertEqual(kser.index.names, pser.index.names)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = "hi"
expected_error_message = "Length of new names must be {}, got {}".format(
len(kdf._internal.index_map), len(["0", "1"])
)
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ["0", "1"]
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ["renamed_number", "renamed_color"]
kidx.names = ["renamed_number", "renamed_color"]
self.assertEqual(kidx.names, pidx.names)
pidx.names = ["renamed_number", None]
kidx.names = ["renamed_number", None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion("2.4"):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = "renamed"
def test_index_rename(self):
pdf = pd.DataFrame(
np.random.randn(10, 5), index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
)
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename("y"), pidx.rename("y"))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename("z", inplace=True)
pidx.rename("z", inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(["n", "c"]), pmidx.rename(["n", "c"]))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(["num", "col"], inplace=True)
pmidx.rename(["num", "col"], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename("number"))
self.assertRaises(ValueError, lambda: kmidx.rename(["number"]))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
kidx = ks.from_pandas(pidx)
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegex(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegex(KeyError, "Requested level (hi)*"):
kidx.unique(level="hi")
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
idx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_drop_duplicates(self):
pidx = pd.Index([4, 2, 4, 1, 4, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.drop_duplicates().sort_values(), pidx.drop_duplicates().sort_values())
self.assert_eq(
(kidx + 1).drop_duplicates().sort_values(), (pidx + 1).drop_duplicates().sort_values()
)
def test_dropna(self):
pidx = pd.Index([np.nan, 2, 4, 1, np.nan, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.dropna(), pidx.dropna())
self.assert_eq((kidx + 1).dropna(), (pidx + 1).dropna())
def test_index_symmetric_difference(self):
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([2, 3, 4, 5])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(
kidx1.symmetric_difference(kidx2).sort_values(),
pidx1.symmetric_difference(pidx2).sort_values(),
)
self.assert_eq(
(kidx1 + 1).symmetric_difference(kidx2).sort_values(),
(pidx1 + 1).symmetric_difference(pidx2).sort_values(),
)
pmidx1 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
pmidx2 = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]],
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(
kmidx1.symmetric_difference(kmidx2).sort_values(),
pmidx1.symmetric_difference(pmidx2).sort_values(),
)
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(["a", "b", "c"])
midx = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
midx_ = ks.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()),
)
with self.assertRaisesRegex(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)()
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)()
# Index properties
missing_properties = inspect.getmembers(
MissingPandasLikeIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index("a").index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index("a").index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(
MissingPandasLikeMultiIndex, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(kdf.set_index(["a", "b"]).index, name)
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Index.*{}.*is deprecated".format(name)
):
getattr(kdf.set_index(["a", "b"]).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, "ks", "ks", None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [
[list("abc"), list("edf")],
[list("aac"), list("edf")],
[list("aac"), list("eef")],
[[1, 4, 4], [4, 6, 6]],
]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
with self.assertRaisesRegex(TypeError, "cannot perform any with this index type"):
kdf.set_index(["a", "b"]).index.any()
with self.assertRaisesRegex(TypeError, "cannot perform all with this index type"):
kdf.set_index(["a", "b"]).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(["a", "b", "c"]))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 1)
def test_multiindex_nlevel(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=[list("abc"), list("def")])
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 2)
def test_multiindex_from_arrays(self):
arrays = [["a", "a", "b", "b"], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays)
kidx = ks.MultiIndex.from_arrays(arrays)
self.assert_eq(pidx, kidx)
def test_multiindex_swaplevel(self):
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", "number"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([["a", "b"], [1, 2]], names=["word", None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
self.assert_eq(pidx.swaplevel("word", 1), kidx.swaplevel("word", 1))
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(-3, "word")
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, 2)
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, -3)
with self.assertRaisesRegex(KeyError, "Level work not found"):
kidx.swaplevel(0, "work")
def test_multiindex_droplevel(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2)], names=["level1", "level2", "level3"]
)
kidx = ks.from_pandas(pidx)
with self.assertRaisesRegex(IndexError, "Too many levels: Index has only 3 levels, not 5"):
kidx.droplevel(4)
with self.assertRaisesRegex(KeyError, "Level level4 not found"):
kidx.droplevel("level4")
with self.assertRaisesRegex(KeyError, "Level.*level3.*level4.*not found"):
kidx.droplevel([("level3", "level4")])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 4 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 0, 1, 2])
with self.assertRaisesRegex(
ValueError,
"Cannot remove 3 levels from an index with 3 levels: at least one "
"level must be left.",
):
kidx.droplevel([0, 1, 2])
self.assert_eq(pidx.droplevel(0), kidx.droplevel(0))
self.assert_eq(pidx.droplevel([0, 1]), kidx.droplevel([0, 1]))
self.assert_eq(pidx.droplevel([0, "level2"]), kidx.droplevel([0, "level2"]))
def test_index_fillna(self):
pidx = pd.Index([1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.fillna(0), kidx.fillna(0))
self.assert_eq(pidx.rename("name").fillna(0), kidx.rename("name").fillna(0))
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kidx.fillna([1, 2])
def test_index_drop(self):
pidx = pd.Index([1, 2, 3])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop(1), kidx.drop(1))
self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))
def test_multiindex_drop(self):
pidx = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z")], names=["level1", "level2"]
)
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop("a"), kidx.drop("a"))
self.assert_eq(pidx.drop(["a", "b"]), kidx.drop(["a", "b"]))
self.assert_eq(pidx.drop(["x", "y"], level=1), kidx.drop(["x", "y"], level=1))
self.assert_eq(pidx.drop(["x", "y"], level="level2"), kidx.drop(["x", "y"], level="level2"))
pidx.names = ["lv1", "lv2"]
kidx.names = ["lv1", "lv2"]
self.assert_eq(pidx.drop(["x", "y"], level="lv2"), kidx.drop(["x", "y"], level="lv2"))
self.assertRaises(IndexError, lambda: kidx.drop(["a", "b"], level=2))
self.assertRaises(KeyError, lambda: kidx.drop(["a", "b"], level="level"))
kidx.names = ["lv", "lv"]
self.assertRaises(ValueError, lambda: kidx.drop(["x", "y"], level="lv"))
def test_sort_values(self):
pidx = pd.Index([-10, -100, 200, 100])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx.name = "koalas"
kidx.name = "koalas"
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx.names = ["hello", "koalas", "goodbye"]
kidx.names = ["hello", "koalas", "goodbye"]
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
def test_index_drop_duplicates(self):
pidx = pd.Index([1, 1, 2])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=["level1", "level2"])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
def test_index_sort(self):
idx = ks.Index([1, 2, 3, 4, 5])
midx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2)])
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
idx.sort()
with self.assertRaisesRegex(
TypeError, "cannot sort an Index object in-place, use sort_values instead"
):
midx.sort()
def test_multiindex_isna(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isna()
with self.assertRaisesRegex(NotImplementedError, "isna is not defined for MultiIndex"):
kidx.isnull()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notna()
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_index_nunique(self):
pidx = pd.Index([1, 1, 2, None])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.nunique(), kidx.nunique())
self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True))
def test_multiindex_nunique(self):
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(NotImplementedError, "notna is not defined for MultiIndex"):
kidx.notnull()
def test_multiindex_rename(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.rename(list("ABC"))
kidx = kidx.rename(list("ABC"))
self.assert_eq(pidx, kidx)
pidx = pidx.rename(["my", "name", "is"])
kidx = kidx.rename(["my", "name", "is"])
self.assert_eq(pidx, kidx)
def test_multiindex_set_names(self):
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.from_pandas(pidx)
pidx = pidx.set_names(["set", "new", "names"])
kidx = kidx.set_names(["set", "new", "names"])
self.assert_eq(pidx, kidx)
pidx.set_names(["set", "new", "names"], inplace=True)
kidx.set_names(["set", "new", "names"], inplace=True)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("first", level=0)
kidx = kidx.set_names("first", level=0)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("second", level=1)
kidx = kidx.set_names("second", level=1)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names("third", level=2)
kidx = kidx.set_names("third", level=2)
self.assert_eq(pidx, kidx)
pidx.set_names("first", level=0, inplace=True)
kidx.set_names("first", level=0, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("second", level=1, inplace=True)
kidx.set_names("second", level=1, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names("third", level=2, inplace=True)
kidx.set_names("third", level=2, inplace=True)
self.assert_eq(pidx, kidx)
def test_multiindex_from_tuples(self):
tuples = [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")]
pidx = pd.MultiIndex.from_tuples(tuples)
kidx = ks.MultiIndex.from_tuples(tuples)
self.assert_eq(pidx, kidx)
def test_multiindex_from_product(self):
iterables = [[0, 1, 2], ["green", "purple"]]
pidx = pd.MultiIndex.from_product(iterables)
kidx = ks.MultiIndex.from_product(iterables)
self.assert_eq(pidx, kidx)
def test_multiindex_tuple_column_name(self):
column_labels = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_labels)
pdf.set_index(("a", "x"), append=True, inplace=True)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf, kdf)
def test_len(self):
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(len(pidx), len(kidx))
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(len(pidx), len(kidx))
def test_delete(self):
pidx = pd.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
kidx = ks.Index([10, 9, 8, 7, 6, 7, 8, 9, 10])
self.assert_eq(pidx.delete(5).sort_values(), kidx.delete(5).sort_values())
self.assert_eq(pidx.delete(-5).sort_values(), kidx.delete(-5).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
with self.assertRaisesRegex(IndexError, "index 10 is out of bounds for axis 0 with size 9"):
kidx.delete(10)
pidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
self.assert_eq(pidx.delete(1).sort_values(), kidx.delete(1).sort_values())
self.assert_eq(pidx.delete(-1).sort_values(), kidx.delete(-1).sort_values())
if LooseVersion(np.__version__) < LooseVersion("1.19"):
self.assert_eq(
pidx.delete([0, 10000]).sort_values(), kidx.delete([0, 10000]).sort_values()
)
self.assert_eq(
pidx.delete([10000, 20000]).sort_values(), kidx.delete([10000, 20000]).sort_values()
)
else:
self.assert_eq(pidx.delete([0]).sort_values(), kidx.delete([0, 10000]).sort_values())
self.assert_eq(pidx.delete([]).sort_values(), kidx.delete([10000, 20000]).sort_values())
def test_append(self):
# Index
pidx = pd.Index(range(10000))
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.append(pidx), kidx.append(kidx))
# Index with name
pidx1 = pd.Index(range(10000), name="a")
pidx2 = pd.Index(range(10000), name="b")
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]}, index=["x", "y", "z"])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index("a").index
pidx2 = pdf2.set_index("d").index
kidx1 = kdf1.set_index("a").index
kidx2 = kdf2.set_index("d").index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# Index from DataFrame with MultiIndex columns
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pdf2 = pd.DataFrame({"a": [7, 8, 9], "d": [10, 11, 12]})
pdf1.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
pdf2.columns = pd.MultiIndex.from_tuples([("a", "x"), ("d", "y")])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index(("a", "x")).index
pidx2 = pdf2.set_index(("d", "y")).index
kidx1 = kdf1.set_index(("a", "x")).index
kidx2 = kdf2.set_index(("d", "y")).index
self.assert_eq(pidx1.append(pidx2), kidx1.append(kidx2))
self.assert_eq(pidx2.append(pidx1), kidx2.append(kidx1))
# MultiIndex
pmidx = pd.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx))
# MultiIndex with names
pmidx1 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["x", "y", "z"]
)
pmidx2 = pd.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["p", "q", "r"]
)
kmidx1 = ks.from_pandas(pmidx1)
kmidx2 = ks.from_pandas(pmidx2)
self.assert_eq(pmidx1.append(pmidx2), kmidx1.append(kmidx2))
self.assert_eq(pmidx2.append(pmidx1), kmidx2.append(kmidx1))
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
self.assert_eq(pmidx1.append(pmidx2).names, kmidx1.append(kmidx2).names)
# Index & MultiIndex currently is not supported
expected_error_message = r"append\(\) between Index & MultiIndex currently is not supported"
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kidx.append(kmidx)
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kmidx.append(kidx)
def test_argmin(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmin(), kidx.argmin())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmin' not allowed for this dtype"
):
kidx.argmin()
def test_argmax(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.from_pandas(pidx)
self.assert_eq(pidx.argmax(), kidx.argmax())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([("a", "x", 1), ("b", "y", 2), ("c", "z", 3)])
with self.assertRaisesRegex(
TypeError, "reduction operation 'argmax' not allowed for this dtype"
):
kidx.argmax()
def test_monotonic(self):
# test monotonic_increasing & monotonic_decreasing for MultiIndex.
# Since the Behavior for null value was changed in pandas >= 1.0.0,
# several cases are tested differently.
datas = []
# increasing / decreasing ordered each index level with string
datas.append([("w", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
datas.append([("w", "d"), ("x", "c"), ("y", "b"), ("z", "a")])
datas.append([("z", "a"), ("y", "b"), ("x", "c"), ("w", "d")])
datas.append([("z", "d"), ("y", "c"), ("x", "b"), ("w", "a")])
# mixed order each index level with string
datas.append([("z", "a"), ("x", "b"), ("y", "c"), ("w", "d")])
datas.append([("z", "a"), ("y", "c"), ("x", "b"), ("w", "d")])
# increasing / decreasing ordered each index level with integer
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 500), (2, 400), (3, 300), (4, 200), (5, 100)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 500), (4, 400), (3, 300), (2, 200), (1, 100)])
# mixed order each index level with integer
datas.append([(1, 500), (3, 400), (2, 300), (4, 200), (5, 100)])
datas.append([(1, 100), (2, 300), (3, 200), (4, 400), (5, 500)])
# integer / negative mixed tests
datas.append([("a", -500), ("b", -400), ("c", -300), ("d", -200), ("e", -100)])
datas.append([("e", -500), ("d", -400), ("c", -300), ("b", -200), ("a", -100)])
datas.append([(-5, "a"), (-4, "b"), (-3, "c"), (-2, "d"), (-1, "e")])
datas.append([(-5, "e"), (-4, "d"), (-3, "c"), (-2, "b"), (-1, "a")])
datas.append([(-5, "e"), (-3, "d"), (-2, "c"), (-4, "b"), (-1, "a")])
datas.append([(-5, "e"), (-4, "c"), (-3, "b"), (-2, "d"), (-1, "a")])
# None type tests (None type is treated as the smallest value)
datas.append([(1, 100), (2, 200), (None, 300), (4, 400), (5, 500)])
datas.append([(5, None), (4, 200), (3, 300), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, None), (2, 400), (1, 500)])
datas.append([(5, 100), (4, 200), (3, 300), (2, 400), (1, None)])
datas.append([(1, 100), (2, 200), (None, None), (4, 400), (5, 500)])
datas.append([(-5, None), (-4, None), (-3, None), (-2, None), (-1, None)])
datas.append([(None, "e"), (None, "c"), (None, "b"), (None, "d"), (None, "a")])
datas.append([(None, None), (None, None), (None, None), (None, None), (None, None)])
# duplicated index value tests
datas.append([("x", "d"), ("y", "c"), ("y", "b"), ("z", "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", None), ("y", None), ("z", "a")])
datas.append([("x", "d"), ("y", "c"), ("y", "b"), (None, "a")])
datas.append([("x", "d"), ("y", "b"), ("y", "c"), (None, "a")])
# more depth tests
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", "q"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "q"), ("y", "c", "p"), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", "p"), ("y", "c", None), ("z", "a", "r")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", None), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.is_monotonic_increasing, pmidx.is_monotonic_increasing)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
# The datas below are showing different result depends on pandas version.
# Because the behavior of handling null values is changed in pandas >= 1.0.0.
datas = []
datas.append([(None, 100), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, 500)])
datas.append([(None, None), (2, 200), (3, 300), (4, 400), (5, 500)])
datas.append([(1, 100), (2, 200), (3, 300), (4, 400), (None, None)])
datas.append([("x", "d"), ("y", None), ("y", "c"), ("z", "a")])
datas.append([("x", "d", "o"), ("y", "c", None), ("y", "c", "q"), ("z", "a", "r")])
for data in datas:
with self.subTest(data=data):
pmidx = pd.MultiIndex.from_tuples(data)
kmidx = ks.from_pandas(pmidx)
expected_increasing_result = pmidx.is_monotonic_increasing
if LooseVersion(pd.__version__) < LooseVersion("1.0.0"):
expected_increasing_result = not expected_increasing_result
self.assert_eq(kmidx.is_monotonic_increasing, expected_increasing_result)
self.assert_eq(kmidx.is_monotonic_decreasing, pmidx.is_monotonic_decreasing)
def test_difference(self):
# Index
kidx1 = ks.Index([1, 2, 3, 4], name="koalas")
kidx2 = ks.Index([3, 4, 5, 6], name="koalas")
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference([3, 4, 5, 6]).sort_values(),
pidx1.difference([3, 4, 5, 6]).sort_values(),
)
self.assert_eq(
kidx1.difference((3, 4, 5, 6)).sort_values(),
pidx1.difference((3, 4, 5, 6)).sort_values(),
)
self.assert_eq(
kidx1.difference({3, 4, 5, 6}).sort_values(),
pidx1.difference({3, 4, 5, 6}).sort_values(),
)
self.assert_eq(
kidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
pidx1.difference({3: 1, 4: 2, 5: 3, 6: 4}).sort_values(),
)
# Exceptions for Index
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference("1234")
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(1234)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(12.34)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(None)
with self.assertRaisesRegex(TypeError, "Input must be Index or array-like"):
kidx1.difference(np.nan)
with self.assertRaisesRegex(
ValueError, "The 'sort' keyword only takes the values of None or True; 1 was passed."
):
kidx1.difference(kidx2, sort=1)
# MultiIndex
kidx1 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "y", 2), ("c", "z", 3)], names=["hello", "koalas", "world"]
)
kidx2 = ks.MultiIndex.from_tuples(
[("a", "x", 1), ("b", "z", 2), ("k", "z", 3)], names=["hello", "koalas", "world"]
)
pidx1 = kidx1.to_pandas()
pidx2 = kidx2.to_pandas()
self.assert_eq(kidx1.difference(kidx2).sort_values(), pidx1.difference(pidx2).sort_values())
self.assert_eq(
kidx1.difference({("a", "x", 1)}).sort_values(),
pidx1.difference({("a", "x", 1)}).sort_values(),
)
self.assert_eq(
kidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
pidx1.difference({("a", "x", 1): [1, 2, 3]}).sort_values(),
)
# Exceptions for MultiIndex
with self.assertRaisesRegex(TypeError, "other must be a MultiIndex or a list of tuples"):
kidx1.difference(["b", "z", "2"])
def test_repeat(self):
pidx = pd.Index(["a", "b", "c"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.repeat(3).sort_values(), pidx.repeat(3).sort_values())
self.assert_eq(kidx.repeat(0).sort_values(), pidx.repeat(0).sort_values())
self.assert_eq((kidx + "x").repeat(3).sort_values(), (pidx + "x").repeat(3).sort_values())
self.assertRaises(ValueError, lambda: kidx.repeat(-1))
self.assertRaises(ValueError, lambda: kidx.repeat("abc"))
pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.repeat(3).sort_values(), pmidx.repeat(3).sort_values())
self.assert_eq(kmidx.repeat(0).sort_values(), pmidx.repeat(0).sort_values())
self.assertRaises(ValueError, lambda: kmidx.repeat(-1))
self.assertRaises(ValueError, lambda: kmidx.repeat("abc"))
def test_unique(self):
pidx = pd.Index(["a", "b", "a"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())
self.assert_eq(kidx.unique().sort_values(), pidx.unique().sort_values())
pmidx = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "a")])
kmidx = ks.from_pandas(pmidx)
self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())
self.assert_eq(kmidx.unique().sort_values(), pmidx.unique().sort_values())
def test_asof(self):
# Increasing values
pidx = pd.Index(["2013-12-31", "2014-01-02", "2014-01-03"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01"))
self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02"))
self.assert_eq(repr(kidx.asof("1999-01-02")), repr(pidx.asof("1999-01-02")))
# Decreasing values
pidx = pd.Index(["2014-01-03", "2014-01-02", "2013-12-31"])
kidx = ks.from_pandas(pidx)
self.assert_eq(kidx.asof("2014-01-01"), pidx.asof("2014-01-01"))
self.assert_eq(kidx.asof("2014-01-02"), pidx.asof("2014-01-02"))
self.assert_eq(kidx.asof("1999-01-02"), pidx.asof("1999-01-02"))
self.assert_eq(repr(kidx.asof("2015-01-02")), repr(pidx.asof("2015-01-02")))
# Not increasing, neither decreasing (ValueError)
kidx = ks.Index(["2013-12-31", "2015-01-02", "2014-01-03"])
self.assertRaises(ValueError, lambda: kidx.asof("2013-12-31"))
kmidx = ks.MultiIndex.from_tuples([("a", "a"), ("a", "b"), ("a", "c")])
self.assertRaises(NotImplementedError, lambda: kmidx.asof(("a", "b")))
def test_union(self):
# Index
pidx1 = pd.Index([1, 2, 3, 4])
pidx2 = pd.Index([3, 4, 5, 6])
kidx1 = ks.from_pandas(pidx1)
kidx2 = ks.from_pandas(pidx2)
self.assert_eq(kidx1.union(kidx2), pidx1.union(pidx2))
self.assert_eq(kidx2.union(kidx1), pidx2.union(pidx1))
self.assert_eq(
kidx1.union([3, 4, 5, 6]), pidx1.union([3, 4, 5, 6]),
)
self.assert_eq(
kidx2.union([1, 2, 3, 4]), pidx2.union([1, 2, 3, 4]),
)
self.assert_eq(
kidx1.union(ks.Series([3, 4, 5, 6])), pidx1.union(pd.Series([3, 4, 5, 6])),
)
self.assert_eq(
kidx2.union(ks.Series([1, 2, 3, 4])), pidx2.union( | pd.Series([1, 2, 3, 4]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #t-C #source: van Beijma et al. (2018)
initAGB_min = 233-72 #t-C
initAGB_max = 233 + 72 #t-C
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26 #years
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #fraction of carbon content in biomass
c_cont_po_plasma = 0.5454 #fraction of carbon content in biomass
tf = 201 #years
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S2nu = df2nu['Firewood_other_energy_use'].values
c_firewood_energy_S2pl = df2pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
decomp_emissions = df['C_remainAGB'].values
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM2nu = DynamicStockModel(t = df2nu['Year'].values, i = df2nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM2pl = DynamicStockModel(t = df2pl['Year'].values, i = df2pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr2nu, ExitFlag2nu = TestDSM2nu.dimension_check()
CheckStr2pl, ExitFlag2pl = TestDSM2pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort2nu, ExitFlag2nu = TestDSM2nu.compute_s_c_inflow_driven()
Stock_by_cohort2pl, ExitFlag2pl = TestDSM2pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S2nu, ExitFlag2nu = TestDSM2nu.compute_stock_total()
S2pl, ExitFlag2pl = TestDSM2pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C2nu, ExitFlag2nu = TestDSM2nu.compute_o_c_from_s_c()
O_C2pl, ExitFlag2pl = TestDSM2pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O2nu, ExitFlag2nu = TestDSM2nu.compute_outflow_total()
O2pl, ExitFlag2pl = TestDSM2pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS2nu, ExitFlag2nu = TestDSM2nu.compute_stock_change()
DS2pl, ExitFlag2pl = TestDSM2pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal2nu, ExitFlag2nu = TestDSM2nu.check_stock_balance()
Bal2pl, ExitFlag2pl = TestDSM2pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM2nu.o)
print(TestDSM2pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#%%
#Step (5): Biomass growth
#Model I Oil Palm Biomass Growth (Khasanah et al. (2015))
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S2nu = df2nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S2pl = df2pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S2nu = df2nu['PH_Emissions_PO'].values
PH_Emissions_PO_S2pl = df2pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2nu(t,remainAGB_CH4_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2nu
#set zero matrix
output_decomp_CH4_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2nu[i:,i] = decomp_CH4_S2nu(t[:len(t)-i],remain_part_CH4_S2nu)
print(output_decomp_CH4_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2nu[:,i] = np.diff(output_decomp_CH4_S2nu[:,i])
i = i + 1
print(subs_matrix_CH4_S2nu[:,:4])
print(len(subs_matrix_CH4_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2nu = subs_matrix_CH4_S2nu.clip(max=0)
print(subs_matrix_CH4_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2nu = abs(subs_matrix_CH4_S2nu)
print(subs_matrix_CH4_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2nu)
subs_matrix_CH4_S2nu = np.vstack((zero_matrix_CH4_S2nu, subs_matrix_CH4_S2nu))
print(subs_matrix_CH4_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2nu = (tf,1)
decomp_tot_CH4_S2nu = np.zeros(matrix_tot_CH4_S2nu)
i = 0
while i < tf:
decomp_tot_CH4_S2nu[:,0] = decomp_tot_CH4_S2nu[:,0] + subs_matrix_CH4_S2nu[:,i]
i = i + 1
print(decomp_tot_CH4_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S2pl(t,remainAGB_CH4_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S2pl
#set zero matrix
output_decomp_CH4_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S2pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S2pl[i:,i] = decomp_CH4_S2pl(t[:len(t)-i],remain_part_CH4_S2pl)
print(output_decomp_CH4_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S2pl[:,i] = np.diff(output_decomp_CH4_S2pl[:,i])
i = i + 1
print(subs_matrix_CH4_S2pl[:,:4])
print(len(subs_matrix_CH4_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S2pl = subs_matrix_CH4_S2pl.clip(max=0)
print(subs_matrix_CH4_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S2pl = abs(subs_matrix_CH4_S2pl)
print(subs_matrix_CH4_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S2pl)
subs_matrix_CH4_S2pl = np.vstack((zero_matrix_CH4_S2pl, subs_matrix_CH4_S2pl))
print(subs_matrix_CH4_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S2pl = (tf,1)
decomp_tot_CH4_S2pl = np.zeros(matrix_tot_CH4_S2pl)
i = 0
while i < tf:
decomp_tot_CH4_S2pl[:,0] = decomp_tot_CH4_S2pl[:,0] + subs_matrix_CH4_S2pl[:,i]
i = i + 1
print(decomp_tot_CH4_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CH4_Enu(t,remainAGB_CH4_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Enu
#set zero matrix
output_decomp_CH4_Enu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Enu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Enu[i:,i] = decomp_CH4_Enu(t[:len(t)-i],remain_part_CH4_Enu)
print(output_decomp_CH4_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Enu[:,i] = np.diff(output_decomp_CH4_Enu[:,i])
i = i + 1
print(subs_matrix_CH4_Enu[:,:4])
print(len(subs_matrix_CH4_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Enu = subs_matrix_CH4_Enu.clip(max=0)
print(subs_matrix_CH4_Enu[:,:4])
#make the results as absolute values
subs_matrix_CH4_Enu = abs(subs_matrix_CH4_Enu)
print(subs_matrix_CH4_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Enu)
subs_matrix_CH4_Enu = np.vstack((zero_matrix_CH4_Enu, subs_matrix_CH4_Enu))
print(subs_matrix_CH4_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Enu = (tf,1)
decomp_tot_CH4_Enu= np.zeros(matrix_tot_CH4_Enu)
i = 0
while i < tf:
decomp_tot_CH4_Enu[:,0] = decomp_tot_CH4_Enu[:,0] + subs_matrix_CH4_Enu[:,i]
i = i + 1
print(decomp_tot_CH4_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CH4_Epl(t,remainAGB_CH4_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_Epl
#set zero matrix
output_decomp_CH4_Epl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_Epl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_Epl[i:,i] = decomp_CH4_Epl(t[:len(t)-i],remain_part_CH4_Epl)
print(output_decomp_CH4_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_Epl[:,i] = np.diff(output_decomp_CH4_Epl[:,i])
i = i + 1
print(subs_matrix_CH4_Epl[:,:4])
print(len(subs_matrix_CH4_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_Epl = subs_matrix_CH4_Epl.clip(max=0)
print(subs_matrix_CH4_Epl[:,:4])
#make the results as absolute values
subs_matrix_CH4_Epl = abs(subs_matrix_CH4_Epl)
print(subs_matrix_CH4_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_Epl)
subs_matrix_CH4_Epl = np.vstack((zero_matrix_CH4_Epl, subs_matrix_CH4_Epl))
print(subs_matrix_CH4_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_Epl = (tf,1)
decomp_tot_CH4_Epl = np.zeros(matrix_tot_CH4_Epl)
i = 0
while i < tf:
decomp_tot_CH4_Epl[:,0] = decomp_tot_CH4_Epl[:,0] + subs_matrix_CH4_Epl[:,i]
i = i + 1
print(decomp_tot_CH4_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S2nu,label='CH4_S2nu')
plt.plot(t,decomp_tot_CH4_S2pl,label='CH4_S2pl')
plt.plot(t,decomp_tot_CH4_Enu,label='CH4_Enu')
plt.plot(t,decomp_tot_CH4_Epl,label='CH4_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S2nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2nu(t,remainAGB_CO2_S2nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2nu
#set zero matrix
output_decomp_CO2_S2nu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2nu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2nu[i:,i] = decomp_CO2_S2nu(t[:len(t)-i],remain_part_CO2_S2nu)
print(output_decomp_CO2_S2nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2nu[:,i] = np.diff(output_decomp_CO2_S2nu[:,i])
i = i + 1
print(subs_matrix_CO2_S2nu[:,:4])
print(len(subs_matrix_CO2_S2nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2nu = subs_matrix_CO2_S2nu.clip(max=0)
print(subs_matrix_CO2_S2nu[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2nu = abs(subs_matrix_CO2_S2nu)
print(subs_matrix_CO2_S2nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2nu)
subs_matrix_CO2_S2nu = np.vstack((zero_matrix_CO2_S2nu, subs_matrix_CO2_S2nu))
print(subs_matrix_CO2_S2nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2nu = (tf,1)
decomp_tot_CO2_S2nu = np.zeros(matrix_tot_CO2_S2nu)
i = 0
while i < tf:
decomp_tot_CO2_S2nu[:,0] = decomp_tot_CO2_S2nu[:,0] + subs_matrix_CO2_S2nu[:,i]
i = i + 1
print(decomp_tot_CO2_S2nu[:,0])
#S2pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
tf = 201
t = np.arange(tf)
def decomp_CO2_S2pl(t,remainAGB_CO2_S2pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_S2pl
#set zero matrix
output_decomp_CO2_S2pl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_S2pl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_S2pl[i:,i] = decomp_CO2_S2pl(t[:len(t)-i],remain_part_CO2_S2pl)
print(output_decomp_CO2_S2pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_S2pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_S2pl[:,i] = np.diff(output_decomp_CO2_S2pl[:,i])
i = i + 1
print(subs_matrix_CO2_S2pl[:,:4])
print(len(subs_matrix_CO2_S2pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_S2pl = subs_matrix_CO2_S2pl.clip(max=0)
print(subs_matrix_CO2_S2pl[:,:4])
#make the results as absolute values
subs_matrix_CO2_S2pl = abs(subs_matrix_CO2_S2pl)
print(subs_matrix_CO2_S2pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_S2pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_S2pl)
subs_matrix_CO2_S2pl = np.vstack((zero_matrix_CO2_S2pl, subs_matrix_CO2_S2pl))
print(subs_matrix_CO2_S2pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_S2pl = (tf,1)
decomp_tot_CO2_S2pl = np.zeros(matrix_tot_CO2_S2pl)
i = 0
while i < tf:
decomp_tot_CO2_S2pl[:,0] = decomp_tot_CO2_S2pl[:,0] + subs_matrix_CO2_S2pl[:,i]
i = i + 1
print(decomp_tot_CO2_S2pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
tf = 201
t = np.arange(tf)
def decomp_CO2_Enu(t,remainAGB_CO2_Enu):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Enu
#set zero matrix
output_decomp_CO2_Enu = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Enu in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Enu[i:,i] = decomp_CO2_Enu(t[:len(t)-i],remain_part_CO2_Enu)
print(output_decomp_CO2_Enu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Enu = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Enu[:,i] = np.diff(output_decomp_CO2_Enu[:,i])
i = i + 1
print(subs_matrix_CO2_Enu[:,:4])
print(len(subs_matrix_CO2_Enu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Enu = subs_matrix_CO2_Enu.clip(max=0)
print(subs_matrix_CO2_Enu[:,:4])
#make the results as absolute values
subs_matrix_CO2_Enu = abs(subs_matrix_CO2_Enu)
print(subs_matrix_CO2_Enu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Enu = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Enu)
subs_matrix_CO2_Enu = np.vstack((zero_matrix_CO2_Enu, subs_matrix_CO2_Enu))
print(subs_matrix_CO2_Enu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Enu = (tf,1)
decomp_tot_CO2_Enu= np.zeros(matrix_tot_CO2_Enu)
i = 0
while i < tf:
decomp_tot_CO2_Enu[:,0] = decomp_tot_CO2_Enu[:,0] + subs_matrix_CO2_Enu[:,i]
i = i + 1
print(decomp_tot_CO2_Enu[:,0])
#Epl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
tf = 201
t = np.arange(tf)
def decomp_CO2_Epl(t,remainAGB_CO2_Epl):
return (1-(1-np.exp(-k*t)))*remainAGB_CO2_Epl
#set zero matrix
output_decomp_CO2_Epl = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_CO2_Epl in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_CO2_Epl[i:,i] = decomp_CO2_Epl(t[:len(t)-i],remain_part_CO2_Epl)
print(output_decomp_CO2_Epl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CO2_Epl = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_CO2_Epl[:,i] = np.diff(output_decomp_CO2_Epl[:,i])
i = i + 1
print(subs_matrix_CO2_Epl[:,:4])
print(len(subs_matrix_CO2_Epl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CO2_Epl = subs_matrix_CO2_Epl.clip(max=0)
print(subs_matrix_CO2_Epl[:,:4])
#make the results as absolute values
subs_matrix_CO2_Epl = abs(subs_matrix_CO2_Epl)
print(subs_matrix_CO2_Epl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CO2_Epl = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_CO2_Epl)
subs_matrix_CO2_Epl = np.vstack((zero_matrix_CO2_Epl, subs_matrix_CO2_Epl))
print(subs_matrix_CO2_Epl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CO2_Epl = (tf,1)
decomp_tot_CO2_Epl = np.zeros(matrix_tot_CO2_Epl)
i = 0
while i < tf:
decomp_tot_CO2_Epl[:,0] = decomp_tot_CO2_Epl[:,0] + subs_matrix_CO2_Epl[:,i]
i = i + 1
print(decomp_tot_CO2_Epl[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S2nu,label='CO2_S2nu')
plt.plot(t,decomp_tot_CO2_S2pl,label='CO2_S2pl')
plt.plot(t,decomp_tot_CO2_Enu,label='CO2_Enu')
plt.plot(t,decomp_tot_CO2_Epl,label='CO2_Epl')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_PF_PO_S2nu = [c_firewood_energy_S2nu, decomp_emissions, TestDSM2nu.o, PH_Emissions_PO_S2nu, PH_Emissions_HWP_S2nu, decomp_tot_CO2_S2nu[:,0]]
Emissions_PF_PO_S2pl = [c_firewood_energy_S2pl, decomp_emissions, TestDSM2pl.o, PH_Emissions_PO_S2pl, PH_Emissions_HWP_S2pl, decomp_tot_CO2_S2pl[:,0]]
Emissions_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions, TestDSM3nu.o, PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu, decomp_tot_CO2_Enu[:,0]]
Emissions_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions, TestDSM3pl.o, PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl, decomp_tot_CO2_Epl[:,0]]
Emissions_PF_PO_S2nu = [sum(x) for x in zip(*Emissions_PF_PO_S2nu)]
Emissions_PF_PO_S2pl = [sum(x) for x in zip(*Emissions_PF_PO_S2pl)]
Emissions_PF_PO_Enu = [sum(x) for x in zip(*Emissions_PF_PO_Enu)]
Emissions_PF_PO_Epl = [sum(x) for x in zip(*Emissions_PF_PO_Epl)]
#CH4_S2nu
Emissions_CH4_PF_PO_S2nu = decomp_tot_CH4_S2nu[:,0]
#CH4_S2pl
Emissions_CH4_PF_PO_S2pl = decomp_tot_CH4_S2pl[:,0]
#CH4_Enu
Emissions_CH4_PF_PO_Enu = decomp_tot_CH4_Enu[:,0]
#CH4_Epl
Emissions_CH4_PF_PO_Epl = decomp_tot_CH4_Epl[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S2nu = Emissions_PF_PO_S2nu
Col2_S2pl = Emissions_PF_PO_S2pl
Col2_Enu = Emissions_PF_PO_Enu
Col2_Epl = Emissions_PF_PO_Epl
Col3_S2nu = Emissions_CH4_PF_PO_S2nu
Col3_S2pl = Emissions_CH4_PF_PO_S2pl
Col3_Enu = Emissions_CH4_PF_PO_Enu
Col3_Epl = Emissions_CH4_PF_PO_Epl
Col4 = flat_list_nucleus
Col5 = Emission_ref
Col6 = flat_list_plasma
#S2
df2_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2nu,'kg_CH4':Col3_S2nu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df2_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S2pl,'kg_CH4':Col3_S2pl,'kg_CO2_seq':Col6,'emission_ref':Col5})
#E
df3_nu = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Enu,'kg_CH4':Col3_Enu,'kg_CO2_seq':Col4,'emission_ref':Col5})
df3_pl = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_Epl,'kg_CH4':Col3_Epl,'kg_CO2_seq':Col6,'emission_ref':Col5})
writer = pd.ExcelWriter('emissions_seq_PF_PO_RB.xlsx', engine = 'xlsxwriter')
df2_nu.to_excel(writer, sheet_name = 'S2_nucleus', header=True, index=False)
df2_pl.to_excel(writer, sheet_name = 'S2_plasma', header=True, index=False)
df3_nu.to_excel(writer, sheet_name = 'E_nucleus', header=True, index=False)
df3_pl.to_excel(writer, sheet_name = 'E_plasma', header=True, index=False)
writer.save()
writer.close()
#%%
## DYNAMIC LCA - wood-based scenarios
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S2_nucleus
df = pd.read_excel('emissions_seq_PF_PO_RB.xlsx', 'S2_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_S2nu = df['kg_CO2'].tolist()
emission_CH4_S2nu = df['kg_CH4'].tolist()
emission_CO2_seq_S2nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S2_plasma
df = pd.read_excel('emissions_seq_PF_PO_RB.xlsx', 'S2_plasma')
emission_CO2_S2pl = df['kg_CO2'].tolist()
emission_CH4_S2pl = df['kg_CH4'].tolist()
emission_CO2_seq_S2pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('emissions_seq_PF_PO_RB.xlsx', 'E_nucleus') # can also index sheet by name or fetch all sheets
emission_CO2_Enu = df['kg_CO2'].tolist()
emission_CH4_Enu = df['kg_CH4'].tolist()
emission_CO2_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('emissions_seq_PF_PO_RB.xlsx', 'E_plasma')
emission_CO2_Epl = df['kg_CO2'].tolist()
emission_CH4_Epl = df['kg_CH4'].tolist()
emission_CO2_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S2_nucleus
df = pd.read_excel('NonRW_PF_PO.xlsx', 'PF_PO_S2nu') # can also index sheet by name or fetch all sheets
emission_NonRW_S2nu = df['NonRW_emissions'].tolist()
emission_Diesel_S2nu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S2nu = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S2_plasma
df = pd.read_excel('NonRW_PF_PO.xlsx', 'PF_PO_S2pl')
emission_NonRW_S2pl = df['NonRW_emissions'].tolist()
emission_Diesel_S2pl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_S2pl = df['kg_CO2_seq'].tolist()
#read E_nucleus
df = pd.read_excel('NonRW_PF_PO.xlsx', 'PF_PO_Enu') # can also index sheet by name or fetch all sheets
emission_NonRW_Enu = df['NonRW_emissions'].tolist()
emission_Diesel_Enu = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Enu = df['kg_CO2_seq'].tolist()
#read E_plasma
df = pd.read_excel('NonRW_PF_PO.xlsx', 'PF_PO_Epl')
emission_NonRW_Epl = df['NonRW_emissions'].tolist()
emission_Diesel_Epl = df['Diesel_emissions'].tolist()
emission_NonRW_seq_Epl = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
#Step (16): Calculate instantaneous global warming impact (GWI)
##wood-based
#S2_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_S2nu = (tf-1,3)
GWI_inst_S2nu = np.zeros(matrix_GWI_S2nu)
for t in range(0,tf-1):
GWI_inst_S2nu[t,0] = np.sum(np.multiply(emission_CO2_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_S2nu[t,1] = np.sum(np.multiply(emission_CH4_S2nu,DCF_CH4_ti[:,t]))
GWI_inst_S2nu[t,2] = np.sum(np.multiply(emission_CO2_seq_S2nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S2nu = (tf-1,1)
GWI_inst_tot_S2nu = np.zeros(matrix_GWI_tot_S2nu)
GWI_inst_tot_S2nu[:,0] = np.array(GWI_inst_S2nu[:,0] + GWI_inst_S2nu[:,1] + GWI_inst_S2nu[:,2])
print(GWI_inst_tot_S2nu[:,0])
#S2_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_S2pl = (tf-1,3)
GWI_inst_S2pl = np.zeros(matrix_GWI_S2pl)
for t in range(0,tf-1):
GWI_inst_S2pl[t,0] = np.sum(np.multiply(emission_CO2_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_S2pl[t,1] = np.sum(np.multiply(emission_CH4_S2pl,DCF_CH4_ti[:,t]))
GWI_inst_S2pl[t,2] = np.sum(np.multiply(emission_CO2_seq_S2pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S2pl = (tf-1,1)
GWI_inst_tot_S2pl = np.zeros(matrix_GWI_tot_S2pl)
GWI_inst_tot_S2pl[:,0] = np.array(GWI_inst_S2pl[:,0] + GWI_inst_S2pl[:,1] + GWI_inst_S2pl[:,2])
print(GWI_inst_tot_S2pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_Enu = (tf-1,3)
GWI_inst_Enu = np.zeros(matrix_GWI_Enu)
for t in range(0,tf-1):
GWI_inst_Enu[t,0] = np.sum(np.multiply(emission_CO2_Enu,DCF_CO2_ti[:,t]))
GWI_inst_Enu[t,1] = np.sum(np.multiply(emission_CH4_Enu,DCF_CH4_ti[:,t]))
GWI_inst_Enu[t,2] = np.sum(np.multiply(emission_CO2_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Enu = (tf-1,1)
GWI_inst_tot_Enu = np.zeros(matrix_GWI_tot_Enu)
GWI_inst_tot_Enu[:,0] = np.array(GWI_inst_Enu[:,0] + GWI_inst_Enu[:,1] + GWI_inst_Enu[:,2])
print(GWI_inst_tot_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_Epl = (tf-1,3)
GWI_inst_Epl = np.zeros(matrix_GWI_Epl)
for t in range(0,tf-1):
GWI_inst_Epl[t,0] = np.sum(np.multiply(emission_CO2_Epl,DCF_CO2_ti[:,t]))
GWI_inst_Epl[t,1] = np.sum(np.multiply(emission_CH4_Epl,DCF_CH4_ti[:,t]))
GWI_inst_Epl[t,2] = np.sum(np.multiply(emission_CO2_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_Epl = (tf-1,1)
GWI_inst_tot_Epl = np.zeros(matrix_GWI_tot_Epl)
GWI_inst_tot_Epl[:,0] = np.array(GWI_inst_Epl[:,0] + GWI_inst_Epl[:,1] + GWI_inst_Epl[:,2])
print(GWI_inst_tot_Epl[:,0])
## NonRW
#GWI_inst for all gases
#S2_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S2nu = (tf-1,3)
GWI_inst_NonRW_S2nu = np.zeros(matrix_GWI_NonRW_S2nu)
for t in range(0,tf-1):
GWI_inst_NonRW_S2nu[t,0] = np.sum(np.multiply(emission_NonRW_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2nu[t,1] = np.sum(np.multiply(emission_Diesel_S2nu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2nu[t,2] = np.sum(np.multiply(emission_NonRW_seq_S2nu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S2nu = (tf-1,1)
GWI_inst_tot_NonRW_S2nu = np.zeros(matrix_GWI_tot_NonRW_S2nu)
GWI_inst_tot_NonRW_S2nu[:,0] = np.array(GWI_inst_NonRW_S2nu[:,0] + GWI_inst_NonRW_S2nu[:,1] + GWI_inst_NonRW_S2nu[:,2])
print(GWI_inst_tot_NonRW_S2nu[:,0])
#S2_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S2pl = (tf-1,3)
GWI_inst_NonRW_S2pl = np.zeros(matrix_GWI_NonRW_S2pl)
for t in range(0,tf-1):
GWI_inst_NonRW_S2pl[t,0] = np.sum(np.multiply(emission_NonRW_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2pl[t,1] = np.sum(np.multiply(emission_Diesel_S2pl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S2pl[t,2] = np.sum(np.multiply(emission_NonRW_seq_S2pl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S2pl = (tf-1,1)
GWI_inst_tot_NonRW_S2pl = np.zeros(matrix_GWI_tot_NonRW_S2pl)
GWI_inst_tot_NonRW_S2pl[:,0] = np.array(GWI_inst_NonRW_S2pl[:,0] + GWI_inst_NonRW_S2pl[:,1] + GWI_inst_NonRW_S2pl[:,2])
print(GWI_inst_tot_NonRW_S2pl[:,0])
#E_nucleus
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Enu = (tf-1,3)
GWI_inst_NonRW_Enu = np.zeros(matrix_GWI_NonRW_Enu)
for t in range(0,tf-1):
GWI_inst_NonRW_Enu[t,0] = np.sum(np.multiply(emission_NonRW_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,1] = np.sum(np.multiply(emission_Diesel_Enu,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Enu[t,2] = np.sum(np.multiply(emission_NonRW_seq_Enu,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Enu = (tf-1,1)
GWI_inst_tot_NonRW_Enu = np.zeros(matrix_GWI_tot_NonRW_Enu)
GWI_inst_tot_NonRW_Enu[:,0] = np.array(GWI_inst_NonRW_Enu[:,0] + GWI_inst_NonRW_Enu[:,1] + GWI_inst_NonRW_Enu[:,2])
print(GWI_inst_tot_NonRW_Enu[:,0])
#E_plasma
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_Epl = (tf-1,3)
GWI_inst_NonRW_Epl = np.zeros(matrix_GWI_NonRW_Epl)
for t in range(0,tf-1):
GWI_inst_NonRW_Epl[t,0] = np.sum(np.multiply(emission_NonRW_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,1] = np.sum(np.multiply(emission_Diesel_Epl,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_Epl[t,2] = np.sum(np.multiply(emission_NonRW_seq_Epl,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_Epl = (tf-1,1)
GWI_inst_tot_NonRW_Epl = np.zeros(matrix_GWI_tot_NonRW_Epl)
GWI_inst_tot_NonRW_Epl[:,0] = np.array(GWI_inst_NonRW_Epl[:,0] + GWI_inst_NonRW_Epl[:,1] + GWI_inst_NonRW_Epl[:,2])
print(GWI_inst_tot_NonRW_Epl[:,0])
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_S2nu = np.array([item for sublist in GWI_inst_tot_NonRW_S2nu for item in sublist])
GWI_inst_tot_NonRW_S2pl = np.array([item for sublist in GWI_inst_tot_NonRW_S2pl for item in sublist])
GWI_inst_tot_NonRW_Enu = np.array([item for sublist in GWI_inst_tot_NonRW_Enu for item in sublist])
GWI_inst_tot_NonRW_Epl = np.array([item for sublist in GWI_inst_tot_NonRW_Epl for item in sublist])
GWI_inst_tot_S2nu = np.array([item for sublist in GWI_inst_tot_S2nu for item in sublist])
GWI_inst_tot_S2pl = np.array([item for sublist in GWI_inst_tot_S2pl for item in sublist])
GWI_inst_tot_Enu = np.array([item for sublist in GWI_inst_tot_Enu for item in sublist])
GWI_inst_tot_Epl = np.array([item for sublist in GWI_inst_tot_Epl for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_inst_tot_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_inst_tot_S2nu, color='lightcoral', label='M_nucleus')
plt.plot(t, GWI_inst_tot_S2pl, color='deeppink', label='M_plasma')
plt.plot(t, GWI_inst_tot_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_inst_tot_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_Enu, GWI_inst_tot_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
plt.ylim(-0.5e-9,1.4e-9)
plt.title('Instantaneous GWI, PF_PO')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-13}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_PF_PO', dpi=300)
plt.show()
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
##wood-based
GWI_cum_S2nu = np.cumsum(GWI_inst_tot_S2nu)
GWI_cum_S2pl = np.cumsum(GWI_inst_tot_S2pl)
GWI_cum_Enu = np.cumsum(GWI_inst_tot_Enu)
GWI_cum_Epl = np.cumsum(GWI_inst_tot_Epl)
##NonRW
GWI_cum_NonRW_S2nu = np.cumsum(GWI_inst_tot_NonRW_S2nu)
GWI_cum_NonRW_S2pl = np.cumsum(GWI_inst_tot_NonRW_S2pl)
GWI_cum_NonRW_Enu = np.cumsum(GWI_inst_tot_NonRW_Enu)
GWI_cum_NonRW_Epl = np.cumsum(GWI_inst_tot_NonRW_Epl)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-11}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
plt.ylim(-0.3e-7,2e-7)
plt.title('Cumulative GWI, PF_PO')
plt.plot(t, GWI_cum_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
plt.plot(t, GWI_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
plt.plot(t, GWI_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
plt.plot(t, GWI_cum_S2nu, color='lightcoral', label='M_nucleus')
plt.plot(t, GWI_cum_S2pl, color='deeppink', label='M_plasma')
plt.plot(t, GWI_cum_Enu, color='royalblue', label='E_nucleus')
plt.plot(t, GWI_cum_Epl, color='deepskyblue', label='E_plasma')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_cum_NonRW_Enu, GWI_cum_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_PF_PO', dpi=300)
plt.show()
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
#determine the GWI inst for the emission reference (1 kg CO2 emission at time zero)
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_S2nu)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
#convert the GWPdyn to tCO2 (divided by 1000)
##wood-based
GWP_dyn_cum_S2nu = [x/(y*1000) for x,y in zip(GWI_cum_S2nu, GWI_cum_ref)]
GWP_dyn_cum_S2pl = [x/(y*1000) for x,y in zip(GWI_cum_S2pl, GWI_cum_ref)]
GWP_dyn_cum_Enu = [x/(y*1000) for x,y in zip(GWI_cum_Enu, GWI_cum_ref)]
GWP_dyn_cum_Epl = [x/(y*1000) for x,y in zip(GWI_cum_Epl, GWI_cum_ref)]
##NonRW
GWP_dyn_cum_NonRW_S2nu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S2nu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S2pl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S2pl, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Enu = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Enu, GWI_cum_ref)]
GWP_dyn_cum_NonRW_Epl = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_Epl, GWI_cum_ref)]
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_S2nu, color='lightcoral', label='NR_M_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_S2pl, color='deeppink', label='NR_M_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Enu, color='royalblue', label='NR_E_nucleus', ls='--')
ax.plot(t, GWP_dyn_cum_NonRW_Epl, color='deepskyblue', label='NR_E_plasma', ls='--')
ax.plot(t, GWP_dyn_cum_S2nu, color='lightcoral', label='M_nucleus')
ax.plot(t, GWP_dyn_cum_S2pl, color='deeppink', label='M_plasma')
ax.plot(t, GWP_dyn_cum_Enu, color='royalblue', label='E_nucleus')
ax.plot(t, GWP_dyn_cum_Epl, color='deepskyblue', label='E_plasma')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_Enu, GWP_dyn_cum_NonRW_S2pl, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_xlim(0,200)
ax.set_ylim(-250,1400)
ax.set_title('Dynamic GWP, PF_PO')
plt.draw()
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_PF_PO', dpi=300)
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_3 = GWI_inst_tot_S2nu
Col_GI_4 = GWI_inst_tot_S2pl
Col_GI_5 = GWI_inst_tot_Enu
Col_GI_6 = GWI_inst_tot_Epl
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_9 = GWI_inst_tot_NonRW_S2nu
Col_GI_10 = GWI_inst_tot_NonRW_S2pl
Col_GI_11 = GWI_inst_tot_NonRW_Enu
Col_GI_12 = GWI_inst_tot_NonRW_Epl
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_3 = GWI_cum_S2nu
Col_GC_4 = GWI_cum_S2pl
Col_GC_5 = GWI_cum_Enu
Col_GC_6 = GWI_cum_Epl
#GWI_cumulative from counter use scenarios
Col_GC_9 = GWI_cum_NonRW_S2nu
Col_GC_10 = GWI_cum_NonRW_S2pl
Col_GC_11 = GWI_cum_NonRW_Enu
Col_GC_12 = GWI_cum_NonRW_Epl
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_3 = GWP_dyn_cum_S2nu
Col_GWP_4 = GWP_dyn_cum_S2pl
Col_GWP_5 = GWP_dyn_cum_Enu
Col_GWP_6 = GWP_dyn_cum_Epl
#GWPdyn from counter use scenarios
Col_GWP_9 = GWP_dyn_cum_NonRW_S2nu
Col_GWP_10 = GWP_dyn_cum_NonRW_S2pl
Col_GWP_11 = GWP_dyn_cum_NonRW_Enu
Col_GWP_12 = GWP_dyn_cum_NonRW_Epl
#Create colum results
dfM_GI = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (W/m2)':Col_GI_3, 'M_plasma (W/m2)':Col_GI_4,
'E_nucleus (W/m2)':Col_GI_5, 'E_plasma (W/m2)':Col_GI_6,
'NR_M_nucleus (W/m2)':Col_GI_9, 'NR_M_plasma (W/m2)':Col_GI_10,
'NR_E_nucleus (W/m2)':Col_GI_11, 'NR_E_plasma (W/m2)':Col_GI_12})
dfM_GC = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (W/m2)':Col_GC_3, 'M_plasma (W/m2)':Col_GC_4,
'E_nucleus (W/m2)':Col_GC_5, 'E_plasma (W/m2)':Col_GC_6,
'NR_M_nucleus (W/m2)':Col_GC_9, 'NR_M_plasma (W/m2)':Col_GC_10,
'NR_E_nucleus (W/m2)':Col_GC_11, 'NR_E_plasma (W/m2)':Col_GC_12})
dfM_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'M_nucleus (t-CO2eq)':Col_GWP_3, 'M_plasma (t-CO2eq)':Col_GWP_4,
'E_nucleus (t-CO2eq)':Col_GWP_5, 'E_plasma (t-CO2eq)':Col_GWP_6,
'NR_M_nucleus (t-CO2eq)':Col_GWP_9, 'NR_M_plasma (t-CO2eq)':Col_GWP_10,
'NR_E_nucleus (t-CO2eq)':Col_GWP_11, 'NR_E_plasma (t-CO2eq)':Col_GWP_12})
#Export to excel
writer = pd.ExcelWriter('GraphResults_PF_PO_RB.xlsx', engine = 'xlsxwriter')
#GWI_inst
dfM_GI.to_excel(writer, sheet_name = 'GWI_Inst_PF_PO', header=True, index=False)
#GWI cumulative
dfM_GC.to_excel(writer, sheet_name = 'Cumulative GWI_PF_PO', header=True, index=False)
#GWP_dyn
dfM_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_PF_PO', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
print(len(year))
division = 1000*44/12
division_CH4 = 1000*16/12
#M_nu
c_firewood_energy_S2nu = [x/division for x in c_firewood_energy_S2nu]
decomp_emissions = [x/division for x in decomp_emissions]
TestDSM2nu.o = [x/division for x in TestDSM2nu.o]
PH_Emissions_PO_S2nu = [x/division for x in PH_Emissions_PO_S2nu]
PH_Emissions_HWP_S2nu = [x/division for x in PH_Emissions_HWP_S2nu]
#OC_storage_S2nu = [x/division for x in OC_storage_S2nu]
flat_list_nucleus = [x/division for x in flat_list_nucleus]
decomp_tot_CO2_S2nu[:,0] = [x/division for x in decomp_tot_CO2_S2nu[:,0]]
decomp_tot_CH4_S2nu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S2nu[:,0]]
#M_pl
c_firewood_energy_S2pl = [x/division for x in c_firewood_energy_S2pl]
TestDSM2pl.o = [x/division for x in TestDSM2pl.o]
PH_Emissions_PO_S2pl = [x/division for x in PH_Emissions_PO_S2pl]
PH_Emissions_HWP_S2pl = [x/division for x in PH_Emissions_HWP_S2pl]
#OC_storage_S2pl = [x/division for x in OC_storage_S2pl]
flat_list_plasma = [x/division for x in flat_list_plasma]
decomp_tot_CO2_S2pl[:,0] = [x/division for x in decomp_tot_CO2_S2pl[:,0]]
decomp_tot_CH4_S2pl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S2pl[:,0]]
#Enu
c_firewood_energy_Enu = [x/division for x in c_firewood_energy_Enu]
c_pellets_Enu = [x/division for x in c_pellets_Enu]
TestDSM3nu.o = [x/division for x in TestDSM3nu.o]
PH_Emissions_PO_Enu = [x/division for x in PH_Emissions_PO_Enu]
PH_Emissions_HWP_Enu = [x/division for x in PH_Emissions_HWP_Enu]
#OC_storage_Enu = [x/division for x in OC_storage_Enu]
decomp_tot_CO2_Enu[:,0] = [x/division for x in decomp_tot_CO2_Enu[:,0]]
decomp_tot_CH4_Enu[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Enu[:,0]]
#Epl
c_firewood_energy_Epl = [x/division for x in c_firewood_energy_Epl]
c_pellets_Epl = [x/division for x in c_pellets_Epl]
TestDSM3pl.o = [x/division for x in TestDSM3pl.o]
PH_Emissions_PO_Epl = [x/division for x in PH_Emissions_PO_Epl]
PH_Emissions_HWP_Epl = [x/division for x in PH_Emissions_HWP_Epl]
#OC_storage_Epl = [x/division for x in OC_storage_Epl]
decomp_tot_CO2_Epl[:,0] = [x/division for x in decomp_tot_CO2_Epl[:,0]]
decomp_tot_CH4_Epl[:,0] = [x/division_CH4 for x in decomp_tot_CH4_Epl[:,0]]
#landfill aggregate flows
Landfill_decomp_PF_PO_S2nu = decomp_tot_CH4_S2nu, decomp_tot_CO2_S2nu
Landfill_decomp_PF_PO_S2pl = decomp_tot_CH4_S2pl, decomp_tot_CO2_S2pl
Landfill_decomp_PF_PO_Enu = decomp_tot_CH4_Enu, decomp_tot_CO2_Enu
Landfill_decomp_PF_PO_Epl = decomp_tot_CH4_Epl, decomp_tot_CO2_Epl
Landfill_decomp_PF_PO_S2nu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S2nu)]
Landfill_decomp_PF_PO_S2pl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_S2pl)]
Landfill_decomp_PF_PO_Enu = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Enu)]
Landfill_decomp_PF_PO_Epl = [sum(x) for x in zip(*Landfill_decomp_PF_PO_Epl)]
Landfill_decomp_PF_PO_S2nu = [item for sublist in Landfill_decomp_PF_PO_S2nu for item in sublist]
Landfill_decomp_PF_PO_S2pl = [item for sublist in Landfill_decomp_PF_PO_S2pl for item in sublist]
Landfill_decomp_PF_PO_Enu = [item for sublist in Landfill_decomp_PF_PO_Enu for item in sublist]
Landfill_decomp_PF_PO_Epl = [item for sublist in Landfill_decomp_PF_PO_Epl for item in sublist]
#Wood processing aggregate flows
OpProcessing_PF_PO_S2nu = [x + y for x, y in zip(PH_Emissions_PO_S2nu, PH_Emissions_HWP_S2nu)]
OpProcessing_PF_PO_S2pl = [x + y for x, y in zip(PH_Emissions_PO_S2pl, PH_Emissions_HWP_S2pl)]
OpProcessing_PF_PO_Enu = [x + y for x, y in zip(PH_Emissions_PO_Enu, PH_Emissions_HWP_Enu)]
OpProcessing_PF_PO_Epl = [x + y for x, y in zip(PH_Emissions_PO_Epl, PH_Emissions_HWP_Epl)]
#M_nu
Column1 = year
Column2 = c_firewood_energy_S2nu
Column3 = decomp_emissions
Column4 = TestDSM2nu.o
Column5 = OpProcessing_PF_PO_S2nu
#Column7_1 = OC_storage_S2nu
Column7 = Landfill_decomp_PF_PO_S2nu
Column8 = flat_list_nucleus
#M_pl
Column9 = c_firewood_energy_S2pl
Column10 = TestDSM2pl.o
Column11 = OpProcessing_PF_PO_S2pl
#Column13_1 = OC_storage_S2pl
Column13 = Landfill_decomp_PF_PO_S2pl
Column14 = flat_list_plasma
#E_nu
Column15 = c_firewood_energy_Enu
Column15_1 = c_pellets_Enu
Column16 = TestDSM3nu.o
Column17 = OpProcessing_PF_PO_Enu
#Column19_1 = OC_storage_Enu
Column19 = Landfill_decomp_PF_PO_Enu
#E_pl
Column20 = c_firewood_energy_Epl
Column20_1 = c_pellets_Epl
Column21 = TestDSM3pl.o
Column22 = OpProcessing_PF_PO_Epl
#Column24_1 = OC_storage_Epl
Column24 = Landfill_decomp_PF_PO_Epl
#M
dfM_nu = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column8,
#'9: Landfill storage (t-C)':Column7_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column2,
'F8-0: Operational stage/processing emissions (t-C)':Column5,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column4,
'F7-0: Landfill gas decomposition (t-C)':Column7})
dfM_pl = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column14,
#'9: Landfill storage (t-C)':Column13_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column9,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column13})
#E
dfE_nu = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column8,
#'9: Landfill storage (t-C)':Column19_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column15,
'F8-0: Operational stage/processing emissions (t-C)':Column17,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16,
'F4-0: Emissions from wood pellets use (t-C)':Column15_1,
'F7-0: Landfill gas decomposition (t-C)':Column19})
dfE_pl = pd.DataFrame.from_dict({'Year':Column1, 'F0-1: Biomass C sequestration (t-C)':Column14,
#'9: Landfill storage (t-C)':Column24_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20,
'F8-0: Operational stage/processing emissions (t-C)':Column22,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column21,
'F4-0: Emissions from wood pellets use (t-C)':Column20_1,
'F7-0: Landfill gas decomposition (t-C)':Column24})
writer = pd.ExcelWriter('C_flows_PF_PO_RB.xlsx', engine = 'xlsxwriter')
dfM_nu.to_excel(writer, sheet_name = 'PF_PO_M_nu', header=True, index=False)
dfM_pl.to_excel(writer, sheet_name = 'PF_PO_M_pl', header=True, index=False)
dfE_nu.to_excel(writer, sheet_name = 'PF_PO_E_nu', header=True, index=False)
dfE_pl.to_excel(writer, sheet_name = 'PF_PO_E_pl', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#PF_PO_M_nu
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
# plot
ax1.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_S2nu, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S2nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, OpProcessing_PF_PO_S2nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM2nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(-1,200)
ax1.set_yscale('symlog')
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows (t-C) (symlog)')
ax1.set_title('Carbon flow, PF_PO_M_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_M_nu
f, (ax_a, ax_b) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_a.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_a.plot(t, OC_storage_S2nu, color='darkturquoise', label='9: Landfill storage')
ax_a.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax_a.plot(t, c_firewood_energy_S2nu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_a.plot(t, OpProcessing_PF_PO_S2nu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_a.plot(t, TestDSM2nu.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_a.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_b.plot(t, c_firewood_energy_S2nu, color='mediumseagreen')
ax_b.plot(t, decomp_emissions, color='lightcoral')
#ax_b.plot(t, OC_storage_S2nu, color='darkturquoise')
ax_b.plot(t, TestDSM2nu.o, color='royalblue')
ax_b.plot(t, OpProcessing_PF_PO_S2nu, color='orange')
ax_b.plot(t, Landfill_decomp_PF_PO_S2nu, color='yellow')
ax_b.plot(t, flat_list_nucleus, color='darkkhaki')
#ax_a.set_yscale('log')
#ax_b.set_yscale('log')
# zoom-in / limit the view to different portions of the data
ax_a.set_xlim(-1,200)
ax_a.set_ylim(60, 75)
ax_b.set_ylim(-25, 35)
#ax_b.set_ylim(-0.3, 0.5)
# hide the spines between ax and ax2
ax_a.spines['bottom'].set_visible(False)
ax_b.spines['top'].set_visible(False)
ax_a.xaxis.tick_top()
ax_a.tick_params(labeltop=False) # don't put tick labels at the top
ax_b.xaxis.tick_bottom()
ax_a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_a.transAxes, color='k', clip_on=False)
ax_a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_b.transAxes) # switch to the bottom axes
ax_b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_b.set_xlabel('Time (year)')
ax_b.set_ylabel('C flows (t-C)')
ax_a.set_ylabel('C flows (t-C)')
ax_a.set_title('Carbon flow, PF_PO_M_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_M_pl
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
# plot
ax2.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_S2pl, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_S2pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2.plot(t, OpProcessing_PF_PO_S2pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, TestDSM2pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(-1,200)
ax2.set_yscale('symlog')
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows (t-C) (symlog)')
ax2.set_title('Carbon flow, PF_PO_M_plasma (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_M_pl
f, (ax_c, ax_d) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_c.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_c.plot(t, OC_storage_S2pl, color='darkturquoise', label='9: Landfill storage')
ax_c.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax_c.plot(t, c_firewood_energy_S2pl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_c.plot(t, OpProcessing_PF_PO_S2pl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_c.plot(t, TestDSM2pl.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax_c.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow', label='F7-0: Landfill gas decomposition')
ax_d.plot(t, c_firewood_energy_S2pl, color='mediumseagreen')
ax_d.plot(t, decomp_emissions, color='lightcoral')
#ax_d.plot(t, OC_storage_S2pl, color='darkturquoise')
ax_d.plot(t, TestDSM2pl.o, color='royalblue')
ax_d.plot(t, OpProcessing_PF_PO_S2pl, color='orange')
ax_d.plot(t, Landfill_decomp_PF_PO_S2pl, color='yellow')
ax_d.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_c.set_xlim(-1,200)
ax_c.set_ylim(60, 75)
ax_d.set_ylim(-25, 35)
# hide the spines between ax and ax2
ax_c.spines['bottom'].set_visible(False)
ax_d.spines['top'].set_visible(False)
ax_c.xaxis.tick_top()
ax_c.tick_params(labeltop=False) # don't put tick labels at the top
ax_d.xaxis.tick_bottom()
ax_c.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_c.transAxes, color='k', clip_on=False)
ax_c.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_c.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_d.transAxes) # switch to the bottom axes
ax_d.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_d.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_d.set_xlabel('Time (year)')
ax_d.set_ylabel('C flows (t-C)')
ax_c.set_ylabel('C flows (t-C)')
ax_c.set_title('Carbon flow, PF_PO_M_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_E_nu
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
# plot
ax3.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax3.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax3.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax3.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax3.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax3.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(-1,200)
ax3.set_yscale('symlog')
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows (t-C) (symlog)')
ax3.set_title('Carbon flow, PF_PO_E_nucleus (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_nu
f, (ax_e, ax_f) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_e.plot(t, flat_list_nucleus, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_e.plot(t, OC_storage_Enu, color='darkturquoise', label='9: Landfill storage')
ax_e.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax_e.plot(t, c_firewood_energy_Enu, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_e.plot(t, OpProcessing_PF_PO_Enu, color='orange', label='F8-0: Operational stage/processing emissions')
ax_e.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow', label='F7-0: Landfill gas decomposition')
ax_e.plot(t, c_pellets_Enu, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_e.plot(t, TestDSM3nu.o, color='royalblue', label='in-use stock output')
ax_f.plot(t, c_firewood_energy_Enu, color='mediumseagreen')
ax_f.plot(t, decomp_emissions, color='lightcoral')
ax_f.plot(t, c_pellets_Enu, color='slategrey')
#ax_f.plot(t, TestDSM3nu.o, color='royalblue')
#ax_f.plot(t, OC_storage_Enu, color='darkturquoise')
ax_f.plot(t, OpProcessing_PF_PO_Enu, color='orange')
ax_f.plot(t, Landfill_decomp_PF_PO_Enu, color='yellow')
ax_f.plot(t, flat_list_nucleus, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_e.set_xlim(-1,200)
ax_e.set_ylim(170, 190)
ax_f.set_ylim(-25, 30)
# hide the spines between ax and ax2
ax_e.spines['bottom'].set_visible(False)
ax_f.spines['top'].set_visible(False)
ax_e.xaxis.tick_top()
ax_e.tick_params(labeltop=False) # don't put tick labels at the top
ax_f.xaxis.tick_bottom()
ax_e.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_e.transAxes, color='k', clip_on=False)
ax_e.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_e.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_f.transAxes) # switch to the bottom axes
ax_f.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_f.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_f.set_xlabel('Time (year)')
ax_f.set_ylabel('C flows (t-C)')
ax_e.set_ylabel('C flows (t-C)')
ax_e.set_title('Carbon flow, PF_PO_E_nucleus')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#plot for the individual carbon flows - symlog-scale graphs
#PF_PO_E_pl
fig=plt.figure()
fig.show()
ax4=fig.add_subplot(111)
# plot
ax4.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax4.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax4.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax4.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
#ax4.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax4.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4.set_xlim(-1,200)
ax4.set_yscale('symlog')
ax4.set_xlabel('Time (year)')
ax4.set_ylabel('C flows (t-C) (symlog)')
ax4.set_title('Carbon flow, PF_PO_E_plasma (symlog-scale)')
plt.show()
#%%
#plotting the individual C flows
#PF_PO_E_pl
f, (ax_g, ax_h) = plt.subplots(2, 1, sharex=True)
# plot the same data on both axes
ax_g.plot(t, flat_list_plasma, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax_g.plot(t, OC_storage_Epl, color='darkturquoise', label='9: Landfill storage')
ax_g.plot(t, decomp_emissions, color='lightcoral', label='F1-0: Residue decomposition')
ax_g.plot(t, c_firewood_energy_Epl, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax_g.plot(t, OpProcessing_PF_PO_Epl, color='orange', label='F8-0: Operational stage/processing emissions')
ax_g.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow', label='F7-0: Landfill gas decomposition')
#ax_g.plot(t, TestDSM3pl.o, color='royalblue', label='in-use stock output')
ax_g.plot(t, c_pellets_Epl, color='slategrey', label='F4-0: Emissions from wood pellets use')
ax_h.plot(t, c_firewood_energy_Epl, color='mediumseagreen')
ax_h.plot(t, c_pellets_Epl, color='slategrey')
ax_h.plot(t, decomp_emissions, color='lightcoral')
#ax_h.plot(t, TestDSM3pl.o, color='royalblue')
ax_h.plot(t, OpProcessing_PF_PO_Epl, color='orange')
#ax_h.plot(t, OC_storage_Epl, color='darkturquoise')
ax_h.plot(t, Landfill_decomp_PF_PO_Epl, color='yellow')
ax_h.plot(t, flat_list_plasma, color='darkkhaki')
# zoom-in / limit the view to different portions of the data
ax_g.set_xlim(-1,200)
ax_g.set_ylim(170, 190)
ax_h.set_ylim(-25, 30)
# hide the spines between ax and ax2
ax_g.spines['bottom'].set_visible(False)
ax_h.spines['top'].set_visible(False)
ax_g.xaxis.tick_top()
ax_g.tick_params(labeltop=False) # don't put tick labels at the top
ax_h.xaxis.tick_bottom()
ax_g.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_g.transAxes, color='k', clip_on=False)
ax_g.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_g.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax_h.transAxes) # switch to the bottom axes
ax_h.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_h.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_h.set_xlabel('Time (year)')
ax_h.set_ylabel('C flows (t-C)')
ax_g.set_ylabel('C flows (t-C)')
ax_g.set_title('Carbon flow, PF_PO_E_plasma')
#plt.plot(t, Cflow_PF_SF_S1)
#plt.plot(t, Cflow_PF_SF_S2)
#plt.plot(t, Cflow_PF_SF_E)
#plt.xlim([0, 200])
plt.show()
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_PF_PO_S2nu = [c_firewood_energy_S2nu, decomp_emissions, TestDSM2nu.o, OpProcessing_PF_PO_S2nu, Landfill_decomp_PF_PO_S2nu, flat_list_nucleus]
Agg_Cflow_PF_PO_S2pl = [c_firewood_energy_S2pl, decomp_emissions, TestDSM2pl.o, OpProcessing_PF_PO_S2pl, Landfill_decomp_PF_PO_S2pl, flat_list_plasma]
Agg_Cflow_PF_PO_Enu = [c_firewood_energy_Enu, c_pellets_Enu, decomp_emissions, TestDSM3nu.o, OpProcessing_PF_PO_Enu, Landfill_decomp_PF_PO_Enu, flat_list_nucleus]
Agg_Cflow_PF_PO_Epl = [c_firewood_energy_Epl, c_pellets_Epl, decomp_emissions, TestDSM3pl.o, OpProcessing_PF_PO_Epl, Landfill_decomp_PF_PO_Epl, flat_list_plasma]
Agg_Cflow_PF_PO_S2nu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S2nu)]
Agg_Cflow_PF_PO_S2pl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_S2pl)]
Agg_Cflow_PF_PO_Enu = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Enu)]
Agg_Cflow_PF_PO_Epl = [sum(x) for x in zip(*Agg_Cflow_PF_PO_Epl)]
fig=plt.figure()
fig.show()
ax5=fig.add_subplot(111)
# plot
ax5.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange', label='M_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise', label='M_plasma')
ax5.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_nucleus')
ax5.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_plasma')
ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax5.set_xlim(-0.3,85)
#ax5.set_yscale('symlog')
ax5.set_xlabel('Time (year)')
ax5.set_ylabel('C flows (t-C)')
ax5.set_title('Net carbon balance, PF_PO')
plt.show()
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_PF_PO = pd.DataFrame.from_dict({'Year':year,'M_nucleus (t-C)':Agg_Cflow_PF_PO_S2nu, 'M_plasma (t-C)':Agg_Cflow_PF_PO_S2pl,
'E_nucleus (t-C)':Agg_Cflow_PF_PO_Enu, 'E_plasma (t-C)':Agg_Cflow_PF_PO_Epl})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_PF_PO_RB.xlsx', engine = 'xlsxwriter')
dfM_PF_PO.to_excel(writer, sheet_name = 'PF_PO', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
##Net carbon balance for M and E (axis break)
f, (ax5a, ax5b) = plt.subplots(2, 1, sharex=True)
ax5a.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange', label='M_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise', label='M_plasma')
ax5a.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral', label='E_nucleus')
ax5a.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen', label='E_plasma')
ax5a.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax5b.plot(t, Agg_Cflow_PF_PO_S2nu, color='orange')
ax5b.plot(t, Agg_Cflow_PF_PO_S2pl, color='darkturquoise')
ax5b.plot(t, Agg_Cflow_PF_PO_Enu, color='lightcoral')
ax5b.plot(t, Agg_Cflow_PF_PO_Epl, color='mediumseagreen')
ax5b.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
# zoom-in / limit the view to different portions of the data
ax5a.set_xlim(-0.35,85)
#ax5a.set_xlim(-1,200)
ax5a.set_ylim(210, 230)
ax5b.set_xlim(-0.35,85)
#ax5b.set_xlim(-1,200)
ax5b.set_ylim(-5, 50)
# hide the spines between ax and ax2
ax5a.spines['bottom'].set_visible(False)
ax5b.spines['top'].set_visible(False)
ax5a.xaxis.tick_top()
ax5a.tick_params(labeltop=False) # don't put tick labels at the top
ax5b.xaxis.tick_bottom()
ax5a.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
d = .012 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax5a.transAxes, color='k', clip_on=False)
ax5a.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax5a.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax5b.transAxes) # switch to the bottom axes
ax5b.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax5b.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax5b.set_xlabel('Time (year)')
ax5b.set_ylabel('C flows (t-C)')
ax5a.set_ylabel('C flows (t-C)')
ax5a.set_title('Net carbon balance, PF_PO')
plt.show()
#%%
#Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df2nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2nu')
df2pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_S2pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO.xlsx', 'PF_PO_Epl')
Column1 = year
division = 1000*44/12
division_CH4 = 1000*16/12
## S2nu
## define the input flow for the landfill (F5-7)
OC_storage_S2nu = df2nu['Other_C_storage'].values
OC_storage_S2nu = [x/division for x in OC_storage_S2nu]
OC_storage_S2nu = [abs(number) for number in OC_storage_S2nu]
C_LF_S2nu = [x*1/0.82 for x in OC_storage_S2nu]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S2nu = [x/division for x in df2nu['Input_PF'].values]
HWP_S2nu_energy = [x*1/3 for x in c_firewood_energy_S2nu]
HWP_S2nu_landfill = [x*1/0.82 for x in OC_storage_S2nu]
HWP_S2nu_sum = [HWP_S2nu, HWP_S2nu_energy, HWP_S2nu_landfill]
HWP_S2nu_sum = [sum(x) for x in zip(*HWP_S2nu_sum)]
#in-use stocks (S-4)
TestDSM2nu.s = [x/division for x in TestDSM2nu.s]
#TestDSM2nu.i = [x/division for x in TestDSM2nu.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S2nu = (tf,1)
stocks_S2nu = np.zeros(zero_matrix_stocks_S2nu)
i = 0
stocks_S2nu[0] = C_LF_S2nu[0] - Landfill_decomp_PF_PO_S2nu[0]
while i < tf-1:
stocks_S2nu[i+1] = np.array(C_LF_S2nu[i+1] - Landfill_decomp_PF_PO_S2nu[i+1] + stocks_S2nu[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S2nu = [x1+x2 for (x1,x2) in zip(HWP_S2nu_sum, [x*2/3 for x in c_firewood_energy_S2nu])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S2nu = (tf,1)
ForCstocks_S2nu = np.zeros(zero_matrix_ForCstocks_S2nu)
i = 0
ForCstocks_S2nu[0] = initAGB - flat_list_nucleus[0] - decomp_emissions[0] - HWP_logged_S2nu[0]
while i < tf-1:
ForCstocks_S2nu[i+1] = np.array(ForCstocks_S2nu[i] - flat_list_nucleus[i+1] - decomp_emissions[i+1] - HWP_logged_S2nu[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df2nu_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO.xlsx', 'PF_PO_S2nu')
NonRW_amount_S2nu = df2nu_amount['NonRW_amount'].values
NonRW_amount_S2nu = [x/1000 for x in NonRW_amount_S2nu]
##NonRW emissions (F9-0-2)
emission_NonRW_S2nu = [x/division for x in emission_NonRW_S2nu]
#create columns
dfM_nu = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_nucleus,
'F1-0 (t-C)': decomp_emissions,
#'F1a-2 (t-C)': PF_PO_S2nu,
#'F1c-2 (t-C)': FP_PO_S2nu,
'F1-2 (t-C)': HWP_logged_S2nu,
'St-1 (t-C)':ForCstocks_S2nu[:,0],
'F2-3 (t-C)': HWP_S2nu_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S2nu],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S2nu_sum, [x*1/0.82 for x in OC_storage_S2nu], [x*1/3 for x in c_firewood_energy_S2nu])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S2nu],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S2nu],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM2nu.s,
#'S-4-i (t-C)': TestDSM2nu.i,
'F4-5 (t-C)': TestDSM2nu.o,
'F5-6 (t-C)': TestDSM2nu.o,
'F5-7 (t-C)': C_LF_S2nu,
'F6-0-1 (t-C)': c_firewood_energy_S2nu,
'F6-0-2 (t-C)': TestDSM2nu.o,
'St-7 (t-C)': stocks_S2nu[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_S2nu,
'F8-0 (t-C)': OpProcessing_PF_PO_S2nu,
'S9-0 (t)': NonRW_amount_S2nu,
'F9-0 (t-C)': emission_NonRW_S2nu,
})
##S2pl
## define the input flow for the landfill (F5-7)
OC_storage_S2pl = df2pl['Other_C_storage'].values
OC_storage_S2pl = [x/division for x in OC_storage_S2pl]
OC_storage_S2pl = [abs(number) for number in OC_storage_S2pl]
C_LF_S2pl = [x*1/0.82 for x in OC_storage_S2pl]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S2pl = [x/division for x in df2pl['Input_PF'].values]
HWP_S2pl_energy = [x*1/3 for x in c_firewood_energy_S2pl]
HWP_S2pl_landfill = [x*1/0.82 for x in OC_storage_S2pl]
HWP_S2pl_sum = [HWP_S2pl, HWP_S2pl_energy, HWP_S2pl_landfill]
HWP_S2pl_sum = [sum(x) for x in zip(*HWP_S2pl_sum)]
#in-use stocks (S-4)
TestDSM2pl.s = [x/division for x in TestDSM2pl.s]
#TestDSM2pl.i = [x/division for x in TestDSM2pl.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S2pl = (tf,1)
stocks_S2pl = np.zeros(zero_matrix_stocks_S2pl)
i = 0
stocks_S2pl[0] = C_LF_S2pl[0] - Landfill_decomp_PF_PO_S2pl[0]
while i < tf-1:
stocks_S2pl[i+1] = np.array(C_LF_S2pl[i+1] - Landfill_decomp_PF_PO_S2pl[i+1] + stocks_S2pl[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S2pl = [x1+x2 for (x1,x2) in zip(HWP_S2pl_sum, [x*2/3 for x in c_firewood_energy_S2pl])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S2pl = (tf,1)
ForCstocks_S2pl = np.zeros(zero_matrix_ForCstocks_S2pl)
i = 0
ForCstocks_S2pl[0] = initAGB - flat_list_plasma[0] - decomp_emissions[0] - HWP_logged_S2pl[0]
while i < tf-1:
ForCstocks_S2pl[i+1] = np.array(ForCstocks_S2pl[i] - flat_list_plasma[i+1] - decomp_emissions[i+1] - HWP_logged_S2pl[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df2pl_amount = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO.xlsx', 'PF_PO_S2pl')
NonRW_amount_S2pl = df2pl_amount['NonRW_amount'].values
NonRW_amount_S2pl = [x/1000 for x in NonRW_amount_S2pl]
##NonRW emissions (F9-0-2)
emission_NonRW_S2pl = [x/division for x in emission_NonRW_S2pl]
#create columns
dfM_pl = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_plasma,
'F1-0 (t-C)': decomp_emissions,
#'F1a-2 (t-C)': PF_PO_S2pl,
#'F1c-2 (t-C)': FP_PO_S2pl,
'F1-2 (t-C)': HWP_logged_S2pl,
'St-1 (t-C)':ForCstocks_S2pl[:,0],
'F2-3 (t-C)': HWP_S2pl_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S2pl],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S2pl_sum, [x*1/0.82 for x in OC_storage_S2pl], [x*1/3 for x in c_firewood_energy_S2pl])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S2pl],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S2pl],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM2pl.s,
#'S-4-i (t-C)': TestDSM2pl.i,
'F4-5 (t-C)': TestDSM2pl.o,
'F5-6 (t-C)': TestDSM2pl.o,
'F5-7 (t-C)': C_LF_S2pl,
'F6-0-1 (t-C)': c_firewood_energy_S2pl,
'F6-0-2 (t-C)': TestDSM2pl.o,
'St-7 (t-C)': stocks_S2pl[:,0],
'F7-0 (t-C)': Landfill_decomp_PF_PO_S2pl,
'F8-0 (t-C)': OpProcessing_PF_PO_S2pl,
'S9-0 (t)': NonRW_amount_S2pl,
'F9-0 (t-C)': emission_NonRW_S2pl,
})
##Enu
## define the input flow for the landfill (F5-7)
OC_storage_Enu = dfEnu['Other_C_storage'].values
OC_storage_Enu = [x/division for x in OC_storage_Enu]
OC_storage_Enu = [abs(number) for number in OC_storage_Enu]
C_LF_Enu = [x*1/0.82 for x in OC_storage_Enu]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_Enu = [x/division for x in dfEnu['Wood_pellets'].values]
HWP_Enu_energy = [x*1/3 for x in c_firewood_energy_Enu]
HWP_Enu_landfill = [x*1/0.82 for x in OC_storage_Enu]
HWP_Enu_sum = [HWP_Enu, HWP_Enu_energy, HWP_Enu_landfill]
HWP_Enu_sum = [sum(x) for x in zip(*HWP_Enu_sum)]
#in-use stocks (S-4)
TestDSM3nu.s = [x/division for x in TestDSM3nu.s]
#TestDSM3nu.i = [x/division for x in TestDSM3nu.i]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_Enu = (tf,1)
stocks_Enu = np.zeros(zero_matrix_stocks_Enu)
i = 0
stocks_Enu[0] = C_LF_Enu[0] - Landfill_decomp_PF_PO_Enu[0]
while i < tf-1:
stocks_Enu[i+1] = np.array(C_LF_Enu[i+1] - Landfill_decomp_PF_PO_Enu[i+1] + stocks_Enu[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_Enu = [x1+x2 for (x1,x2) in zip(HWP_Enu_sum, [x*2/3 for x in c_firewood_energy_Enu])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_Enu = (tf,1)
ForCstocks_Enu = np.zeros(zero_matrix_ForCstocks_Enu)
i = 0
ForCstocks_Enu[0] = initAGB - flat_list_nucleus[0] - decomp_emissions[0] - HWP_logged_Enu[0]
while i < tf-1:
ForCstocks_Enu[i+1] = np.array(ForCstocks_Enu[i] - flat_list_nucleus[i+1] - decomp_emissions[i+1] - HWP_logged_Enu[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
dfEnu_amount = | pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_PF_PO.xlsx', 'PF_PO_Enu') | pandas.read_excel |
import pandas as pd
import math
import sys
import numpy as np
from process.validate_csv_data import check_yearly_records, check_range_records, mapping_id
def create_csv_population_populationUnit(read_path, output_path, yearly_file, range_file):
"""
create population_人口.csv, populationUnit_人口单位.csv that will be loaded into database.
Process data in "Population and Migration - Range.csv", "Population and Migration - Yearly.csv" files to "population_人口.csv", "populationUnit_人口单位.csv"
Categories for population table is read from "Database Data" directory
Categories are predefined and created.
If categories are changed, population categories in Category.py should be changed first.
:param read_path: path to directory contains "Population and Migration - Range.csv", "Population and Migration - Yearly.csv"
:param output_path: path to directory stores "population_人口.csv", "populationUnit_人口单位.csv"
:param yearly_file: file contains population yearly data
:param range_file: file contains population range data
"""
yearly_df = pd.read_csv(read_path + "/" + yearly_file)
range_df = pd.read_csv(read_path + "/" + range_file)
yearly_df = yearly_df.dropna(axis=0, how='all')
range_df = range_df.dropna(axis=0, how='all')
yearly_df.drop_duplicates(inplace=True)
range_df.drop_duplicates(inplace=True)
# validate data
print("Validate Population Yearly data")
correct = check_yearly_records(yearly_df, range(1949, 2020))
print("Validate Population Range data")
correct = check_range_records(range_df)
if not correct:
sys.exit("Correct records first.")
else:
print("Finish validate.")
# create level1 and level2 category
for column in ['Division1', 'Division2', 'Division3', 'Division4']:
yearly_df[column] = yearly_df[column].where(yearly_df[column].notnull(), "")
range_df[column] = range_df[column].where(range_df[column].notnull(), "")
yearly_df['level1'] = yearly_df['Category']
yearly_df['level2'] = yearly_df['Division1'] + yearly_df['Division2'] + yearly_df['Division3'] + yearly_df['Division4']
range_df['level1'] = range_df['Category']
range_df['level2'] = range_df['Division1'] + range_df['Division2'] + range_df['Division3'] + range_df['Division4']
# add "Unit" column to the yearly_df
# df.iloc[col][]
count = 0 # count number of rows
yearly_df["Unit"] = None # create a new empty column "Unit"
unit_temp = []
for row in yearly_df['level1'].values.tolist():
if row == "人口 Population":
unit_temp.append("人数 number of people")
elif (row == "农转非 Agricultural to Non-Agricultural Hukou / Change of Residency Status") and (yearly_df['level2'][count] == "人数 number of people"):
unit_temp.append("人数 number of people")
elif (row == "农转非 Agricultural to Non-Agricultural Hukou / Change of Residency Status") and (yearly_df['level2'][count] == "户数 number of households"):
unit_temp.append("户数 number of households")
elif row == "出生人数 Number of Births":
unit_temp.append("人数 number of people")
elif row == "户数 number of households":
unit_temp.append("户数 number of households")
elif row == "死亡人数 Number of Deaths":
unit_temp.append("人数 number of people")
elif row == "死亡率 Death Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "死亡率 Death Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "残疾人数 Disabled Population":
unit_temp.append("人数 number of people")
elif row == "流动人口/暂住人口 Migratory/Temporary Population":
unit_temp.apend("人数 number of people")
elif row == "自然出生率 Birth Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "自然出生率 Birth Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "自然增长率 Natural Population Growth Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "自然增长率 Natural Population Growth Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "迁入 Migration In":
unit_temp.append("人数 number of people")
else: # set default unit to number of people
unit_temp.append("人数 number of people")
yearly_df["Unit"] = unit_temp
print("added unit column to population yearly table.")
#yearly_df.to_csv(output_path + "test.csv",encoding='utf-8-sig')
# add "Unit" column to the range_df
count = 0 # count number of rows
range_df["Unit"] = None # create a new empty column "Unit"
unit_temp = []
for row in range_df['level1'].values.tolist():
if row == "人口 Population":
unit_temp.append("人数 number of people")
elif (row == "农转非 Agricultural to Non-Agricultural Hukou / Change of Residency Status") and (yearly_df['level2'][count] == "人数 number of people"):
unit_temp.append("人数 number of people")
elif (row == "农转非 Agricultural to Non-Agricultural Hukou / Change of Residency Status") and (yearly_df['level2'][count] == "户数 number of households"):
unit_temp.append("户数 number of households")
elif row == "出生人数 Number of Births":
unit_temp.append("人数 number of people")
elif row == "户数 number of households":
unit_temp.append("户数 number of households")
elif row == "死亡人数 Number of Deaths":
unit_temp.append("人数 number of people")
elif row == "死亡率 Death Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "死亡率 Death Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "残疾人数 Disabled Population":
unit_temp.append("人数 number of people")
elif row == "流动人口/暂住人口 Migratory/Temporary Population":
unit_temp.apend("人数 number of people")
elif row == "自然出生率 Birth Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "自然出生率 Birth Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "自然增长率 Natural Population Growth Rate (%)":
unit_temp.append("百分比 percentage")
elif row == "自然增长率 Natural Population Growth Rate (‰)":
unit_temp.append("千分比 perthousand")
elif row == "迁入 Migration In":
unit_temp.append("人数 number of people")
else: # set default unit to number of people
unit_temp.append("人数 number of people")
range_df["Unit"] = unit_temp
print("added unit column to population range_df table.")
#range_df.to_csv(output_path + "test.csv",encoding='utf-8-sig')
# transfer yearly_df to dictionary
yearly_data = {}
for column in yearly_df.columns:
yearly_data[column] = yearly_df[column].values.tolist()
# transfer range_df to dictionary
range_data = {}
for column in range_df.columns:
range_data[column] = range_df[column].values.tolist()
# merge yearly_df and range_df into df_yearly_range
yearly_and_range = {
'村志代码 Gazetteer Code': [],
'level1': [],
'level2': [],
'Start Year': [],
'End Year': [],
'Data': [],
'Unit': []}
print("process {} records in {} file".format(len(yearly_data['村志代码 Gazetteer Code']), yearly_file))
# select and store not null records at yearly
for i in range(len(yearly_data['村志代码 Gazetteer Code'])):
for year in range(1949, 2020):
# skip null records
if math.isnan(yearly_data[str(year)][i]):
continue
# store gazetteer code, categories, unit
for key in ['村志代码 Gazetteer Code', 'level1', 'level2', 'Unit']:
yearly_and_range[key].append(yearly_data[key][i])
# store data
yearly_and_range['Data'].append(yearly_data[str(year)][i])
# store start year, end year
yearly_and_range['Start Year'].append(year)
yearly_and_range['End Year'].append(year)
print("process {} records in {} file".format(len(range_data['村志代码 Gazetteer Code']), range_file))
# store range records
for i in range(len(range_data['村志代码 Gazetteer Code'])):
# store gazetteer code, categories, unit, start year, end year, data
for key in ['村志代码 Gazetteer Code', 'level1', 'level2', 'Start Year', 'End Year', 'Data', 'Unit']:
yearly_and_range[key].append(range_data[key][i])
# create df stores yearly and range data
yearly_and_range_df = pd.DataFrame(yearly_and_range)
# --- append category id ---
# group by categories
groupby_categories = yearly_and_range_df.groupby(['level1', 'level2'])
population_df = pd.DataFrame()
category_df = pd.read_csv("Database data/populationcategory_人口类.csv")
for group, frame in groupby_categories:
level1_category = group[0]
level2_category = group[1]
parent_id = math.nan
if level1_category != "":
category_id = mapping_id(level1_category, parent_id, category_df)
parent_id = category_id
frame['category_id'] = [category_id] * len(frame)
if level2_category != "":
category_id = mapping_id(level2_category, parent_id, category_df)
parent_id = category_id
frame['category_id'] = [category_id] * len(frame)
population_df = | pd.concat([population_df, frame]) | pandas.concat |
import json
from datetime import datetime as dt
import math
import pandas as pd
from schemas.TableSchema import TableSchema
from games.LAKELAND.LakelandExtractor import LakelandExtractor
from realtime.ModelManager import ModelManager
from schemas.GameSchema import GameSchema
dump = pd.read_csv(
"tests/test_data/LAKELAND_20200828_to_20200828 2/LAKELAND_20200828_to_20200828_d45ae97_dump.tsv", sep='\t')
dump = dump.rename(
columns={'sess_id':'session_id', 'client_secs_ms':'client_time_ms', 'persistent_sess_id':'persistent_session_id'})
proc = pd.read_csv("tests/test_data/LAKELAND_20200828_to_20200828/LAKELAND_20200828_to_20200828_d45ae97_proc.csv")
# print(df.columns)
model_name = 'PopAchVelocityModel'
file_version = 'v18'
schema = GameSchema("LAKELAND", "schemas/JSON/")
model_mgr = ModelManager(game_name="LAKELAND")
col_names = list(dump.columns)
game_id = dump['app_id'][0]
min_level = dump['level'].min()
max_level = dump['level'].max()
table = TableSchema(game_id=game_id, column_names=col_names, max_level=max_level, min_level=min_level)
#table = TableSchema.FromCSV(dump)
session_id_list = dump.session_id.unique()
model = model_mgr.LoadModel(model_name)
test_outfile = open('test_outfile.csv', 'w+')
next_session_result = []
ids = []
session_result_list = []
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError, OverflowError):
return False
else:
return a == b
def parse_nums(x):
for k, v in x.items():
neg, p = False, ""
if len(v.split('-')) == 2:
neg = True
if neg:
p = v.split("-")[1]
else:
p = v
if isint(p):
x[k] = int(float(p)) if not neg else -1 * int(float(p))
elif isfloat(p):
x[k] = float(p) if not neg else -1 * float(p)
return x
def get_time(x):
return dt.strptime(x, '%Y-%m-%d %H:%M:%S')
session_result_list = []
for session in session_id_list:
slice = 300
next_session_result = []
dump_session_data = dump.loc[dump['session_id'] == session].to_dict('records')
extractor = LakelandExtractor(session, table, schema, test_outfile)
for i in range(0, len(dump_session_data), slice):
next_slice = dump_session_data[(i-slice):i]
# print(i, len(next_slice))
print(len(dump_session_data), i)
for row in next_slice:
row = list(row.values())
col = row[table.complex_data_index]
complex_data_parsed = json.loads(col) if (col is not None) else {"event_custom": row[table.event_custom_index]}
if "event_custom" not in complex_data_parsed.keys():
complex_data_parsed["event_custom"] = row[table.event_custom_index]
row[table.client_time_index] = dt.strptime(row[table.client_time_index], '%Y-%m-%d %H:%M:%S')
row[table.complex_data_index] = complex_data_parsed
extractor.extractFromRow(row_with_complex_parsed=row, game_table=table)
extractor.CalculateAggregateFeatures()
all_features = dict(zip(extractor.getFeatureNames(game_table=table, game_schema=schema),
extractor.GetCurrentFeatures()))
all_features = parse_nums(all_features)
result = model.Eval([all_features])[0]
if result is None:
break
next_session_result.append(model.Eval([all_features])[0])
print("next_sess_result", next_session_result)
next_session_result.insert(0, session)
session_result_list.append(next_session_result)
out_df = | pd.DataFrame(session_result_list) | pandas.DataFrame |
#Rule - No content in brackets should be there in VOICE_ONLY column.
'''
Example:
To complete the Master Promissory Note (M.P.N) and entrance counseling, students must go to the Federal Student Aid website. ----- Incorrect
To complete the Master Promissory Note and entrance counseling, students must go to the Federal Student Aid website. ------------- Correct.
'''
def no_content_in_brackets(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="No_content_in_brackets"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_apply=to_check['columns_to_apply']
regex = r'\[(.+)\]|\((.+)\)|\{(.+)\}'
def check_content_in_bracket(string):
if(re.search(regex,string)):
return True
else:
return False
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
df = | pd.read_excel(fle) | pandas.read_excel |
import numpy as np
import pandas as pd
import pickle as pkl
import proj_utils as pu
from os.path import isdir, join
from os import mkdir
from copy import deepcopy
from imblearn.over_sampling import RandomOverSampler, SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sklearn import ensemble, feature_selection, model_selection, preprocessing, svm, metrics, neighbors
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import shuffle
from sklearn.exceptions import ConvergenceWarning
seed = 13
def calc_scores(y_test, predicted):
balanced = metrics.balanced_accuracy_score(y_test, predicted)
chance = metrics.balanced_accuracy_score(y_test, predicted, adjusted=True)
f1 = metrics.f1_score(y_test, predicted, average=None)
return balanced, chance, f1
def save_scores(f1_scores, balanced_scores, chance_scores, class_labels):
# Calculate average performance and tack it onto the end of the score list, save to nice df
n_folds = len(balanced_scores)
f1_array = np.asarray(f1_scores)
if n_folds != f1_array.shape[0]:
raise ValueError("Number of folds does not match")
rownames = ['Fold %02d' % (n+1) for n in range(n_folds)]
rownames.append('Average')
f1_class_averages = np.mean(f1_array, axis=0)
f1_data = np.vstack((f1_array, f1_class_averages))
f1_df = pd.DataFrame(f1_data, index=rownames, columns=class_labels)
balanced_scores.append(np.mean(balanced_scores))
chance_scores.append(np.mean(chance_scores))
accuracy_data = np.asarray([balanced_scores, chance_scores]).T
score_df = pd.DataFrame(data=accuracy_data, index=rownames, columns=['Balanced accuracy', 'Chance accuracy'])
return f1_df, score_df
def svmc(x_train, y_train, x_test, cleaned_features):
clf = svm.LinearSVC(fit_intercept=False, random_state=seed)
clf.fit(x_train, y_train)
target_classes = clf.classes_
target_classes = [str(c) for c in target_classes]
predicted = clf.predict(x_test)
if len(target_classes) == 2:
idx_label = ['coefficients']
else:
idx_label = target_classes
coef_df = pd.DataFrame(clf.coef_, index=idx_label, columns=cleaned_features)
return predicted, coef_df, clf
def extra_trees(x_train, y_train, x_test, cleaned_features):
clf = ensemble.ExtraTreesClassifier(random_state=seed)
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
feature_df = pd.DataFrame(columns=cleaned_features)
feature_df.loc['feature_importances'] = clf.feature_importances_
return predicted, feature_df, clf
def knn(x_train, y_train, x_test):
clf = neighbors.KNeighborsClassifier()
clf.fit(x_train, y_train)
predicted = clf.predict(x_test)
return predicted, clf
def convert_hads_to_single_label(hads_array):
hads_array = hads_array.astype(int)
vartypes = ['anxiety', 'depression']
hads_single_label = []
for row in range(hads_array.shape[0]):
str_combos = []
for col in range(hads_array.shape[1]):
val = hads_array[row, col]
if val == 0:
str_convert = '%s_normal' % vartypes[col]
elif val == 1:
str_convert = '%s_borderline' % vartypes[col]
elif val == 2:
str_convert = '%s_abnormal' % vartypes[col]
str_combos.append(str_convert)
hads_combined = '%s-%s' % (str_combos[0], str_combos[1])
hads_single_label.append(hads_combined)
return hads_single_label
def feature_selection_with_covariates(x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names):
# Split data for continuous, categorical preprocessing
x_train_cont, x_test_cont = x_train[:, continuous_indices], x_test[:, continuous_indices]
x_train_cat, x_test_cat = x_train[:, categorical_indices], x_test[:, categorical_indices]
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train_cont)
x_train_z = preproc.transform(x_train_cont)
x_test_z = preproc.transform(x_test_cont)
# Variance threshold for categorical data
varthresh = feature_selection.VarianceThreshold(threshold=0).fit(x_train_cat)
x_train_v = varthresh.transform(x_train_cat)
x_test_v = varthresh.transform(x_test_cat)
x_train_data = np.hstack((x_train_z, x_train_v))
x_test_data = np.hstack((x_test_z, x_test_v))
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_data, y_train)
x_test_feature_selected = feature_model.transform(x_test_data)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
def feature_selection_without_covariates(x_train, x_test, y_train, feature_names):
# Standardization for continuous data
preproc = preprocessing.StandardScaler().fit(x_train)
x_train_z = preproc.transform(x_train)
x_test_z = preproc.transform(x_test)
# Feature selection with extra trees
extra_tree_fs = ensemble.ExtraTreesClassifier(random_state=seed)
feature_model = feature_selection.SelectFromModel(extra_tree_fs, threshold="2*mean")
# Transform train and test data with feature selection model
x_train_feature_selected = feature_model.fit_transform(x_train_z, y_train)
x_test_feature_selected = feature_model.transform(x_test_z)
feature_indices = feature_model.get_support(indices=True)
cleaned_features = [feature_names[i] for i in feature_indices]
return x_train_feature_selected, x_test_feature_selected, cleaned_features
@ignore_warnings(category=ConvergenceWarning)
def eeg_classify(eeg_data, target_data, target_type, model, outdir=None, resample='SMOTE'):
feature_names = list(eeg_data)
if "categorical_sex_male" in feature_names:
cv_check = 'with_covariates'
else:
cv_check = 'without_covariates'
if resample is 'no_resample':
class NoResample: # for convenience
@staticmethod
def fit_resample(a, b):
return a.values, np.asarray(b)
resampler = NoResample()
elif resample is 'ROS':
resampler = RandomOverSampler(sampling_strategy='not majority', random_state=seed)
elif resample is 'SMOTE':
resampler = SMOTE(sampling_strategy='not majority', random_state=seed)
elif resample is 'RUS':
resampler = RandomUnderSampler(sampling_strategy='not minority', random_state=seed)
x_res, y_res = resampler.fit_resample(eeg_data, target_data)
if outdir is not None:
model_outdir = join(outdir, '%s %s %s %s' % (target_type, model, cv_check, resample))
if not isdir(model_outdir):
mkdir(model_outdir)
print('%s: Running classification - %s %s %s %s' % (pu.ctime(), target_type, model, cv_check, resample))
# Apply k-fold splitter
n_splits = 50
skf = model_selection.StratifiedKFold(n_splits=n_splits, random_state=seed)
skf.get_n_splits(x_res, y_res)
fold_count = 0
classifier_objects, classifier_coefficients, cm_dict, norm_cm_dict = {}, {}, {}, {}
balanced_acc, chance_acc, f1_scores = [], [], []
for train_idx, test_idx in skf.split(x_res, y_res):
fold_count += 1
print('%s: Running FOLD %d for %s' % (pu.ctime(), fold_count, target_type))
foldname = 'Fold %02d' % fold_count
# Stratified k-fold splitting
x_train, x_test = x_res[train_idx], x_res[test_idx]
y_train, y_test = y_res[train_idx], y_res[test_idx]
if "categorical_sex_male" in feature_names:
continuous_features = [f for f in feature_names if 'categorical' not in f]
continuous_indices = [eeg_data.columns.get_loc(cont) for cont in continuous_features]
categorical_features = [f for f in feature_names if 'categorical' in f]
categorical_indices = [eeg_data.columns.get_loc(cat) for cat in categorical_features]
x_train_fs, x_test_fs, cleaned_features = feature_selection_with_covariates(
x_train, x_test, y_train, continuous_indices, categorical_indices, feature_names)
else:
x_train_fs, x_test_fs, cleaned_features = feature_selection_without_covariates(
x_train, x_test, y_train, feature_names)
if model is 'svm':
predicted, coef_df, clf = svmc(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = coef_df
elif model is 'extra_trees':
predicted, feature_importances, clf = extra_trees(x_train_fs, y_train, x_test_fs, cleaned_features)
classifier_coefficients[foldname] = feature_importances
elif model is 'knn':
predicted, clf = knn(x_train_fs, y_train, x_test_fs)
classifier_objects[foldname] = clf
# Calculating fold performance scores
balanced, chance, f1 = calc_scores(y_test, predicted)
balanced_acc.append(balanced)
chance_acc.append(chance)
f1_scores.append(f1)
# Calculating fold confusion matrix
cm = metrics.confusion_matrix(y_test, predicted)
normalized_cm = cm.astype('float')/cm.sum(axis=1)[:, np.newaxis]
cm_dict[foldname] = pd.DataFrame(cm, index=clf.classes_, columns=clf.classes_)
norm_cm_dict[foldname] = pd.DataFrame(normalized_cm, index=clf.classes_, columns=clf.classes_)
# Saving performance scores
f1_df, score_df = save_scores(f1_scores, balanced_acc, chance_acc, class_labels=clf.classes_)
scores_dict = {'accuracy scores': score_df,
'f1 scores': f1_df}
try:
pu.save_xls(scores_dict, join(model_outdir, 'performance.xlsx'))
# Saving coefficients
if bool(classifier_coefficients):
pu.save_xls(classifier_coefficients, join(model_outdir, 'coefficients.xlsx'))
pu.save_xls(cm_dict, join(model_outdir, 'confusion_matrices.xlsx'))
pu.save_xls(norm_cm_dict, join(model_outdir, 'confusion_matrices_normalized.xlsx'))
# Saving classifier object
with open(join(model_outdir, 'classifier_object.pkl'), 'wb') as file:
pkl.dump(classifier_objects, file)
except Exception:
pass
return scores_dict
def side_classification_drop_asym(ml_data, behavior_data, output_dir, models=None):
print('%s: Running classification on tinnitus side, dropping asymmetrical subjects' % pu.ctime())
ml_copy = deepcopy(ml_data)
if models is None:
models = ['extra_trees']
resample_methods = [None, 'over', 'under']
t = pu.convert_tin_to_str(behavior_data['tinnitus_side'].values.astype(float), 'tinnitus_side')
t_df = | pd.DataFrame(t, index=ml_copy.index) | pandas.DataFrame |
"""
Filter and combine various peptide/MHC datasets to derive a composite training set,
optionally including eluted peptides identified by mass-spec.
"""
import sys
import argparse
import os
import json
import collections
from six.moves import StringIO
import pandas
from mhcflurry.common import normalize_allele_name
def normalize_allele_name_or_return_unknown(s):
return normalize_allele_name(
s,
raise_on_error=False,
default_value="UNKNOWN")
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--ms-item",
nargs="+",
action="append",
metavar="PMID FILE, ... FILE",
default=[],
help="Mass spec item to curate: PMID and list of files")
parser.add_argument(
"--expression-item",
nargs="+",
action="append",
metavar="LABEL FILE, ... FILE",
default=[],
help="Expression data to curate: dataset label and list of files")
parser.add_argument(
"--ms-out",
metavar="OUT.csv",
help="Out file path (MS data)")
parser.add_argument(
"--expression-out",
metavar="OUT.csv",
help="Out file path (RNA-seq expression)")
parser.add_argument(
"--expression-metadata-out",
metavar="OUT.csv",
help="Out file path for expression metadata, i.e. which samples used")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Leave user in pdb if PMID is unsupported")
PMID_HANDLERS = {}
EXPRESSION_HANDLERS = {}
def load(filenames, **kwargs):
result = {}
for filename in filenames:
if filename.endswith(".csv"):
result[filename] = pandas.read_csv(filename, **kwargs)
elif filename.endswith(".xlsx") or filename.endswith(".xls"):
result[filename] = pandas.read_excel(filename, **kwargs)
else:
result[filename] = filename
return result
def debug(*filenames):
loaded = load(filenames)
import ipdb
ipdb.set_trace()
def handle_pmid_27600516(filename):
"""Gloger, ..., Neri Cancer Immunol Immunother 2016 [PMID 27600516]"""
df = pandas.read_csv(filename)
sample_to_peptides = {}
current_sample = None
for peptide in df.peptide:
if peptide.startswith("#"):
current_sample = peptide[1:]
sample_to_peptides[current_sample] = []
else:
assert current_sample is not None
sample_to_peptides[current_sample].append(peptide.strip().upper())
rows = []
for (sample, peptides) in sample_to_peptides.items():
for peptide in sorted(set(peptides)):
rows.append([sample, peptide])
result_df = pandas.DataFrame(rows, columns=["sample_id", "peptide"])
result_df["sample_type"] = "melanoma_cell_line"
result_df["cell_line"] = result_df.sample_id
result_df["mhc_class"] = "I"
result_df["pulldown_antibody"] = "W6/32"
result_df["format"] = "multiallelic"
result_df["hla"] = result_df.sample_id.map({
"FM-82": "HLA-A*02:01 HLA-A*01:01 HLA-B*08:01 HLA-B*15:01 HLA-C*03:04 HLA-C*07:01",
"FM-93/2": "HLA-A*02:01 HLA-A*26:01 HLA-B*40:01 HLA-B*44:02 HLA-C*03:04 HLA-C*05:01",
"Mel-624": "HLA-A*02:01 HLA-A*03:01 HLA-B*07:02 HLA-B*14:01 HLA-C*07:02 HLA-C*08:02",
"MeWo": "HLA-A*02:01 HLA-A*26:01 HLA-B*14:02 HLA-B*38:01 HLA-C*08:02 HLA-C*12:03",
"SK-Mel-5": "HLA-A*02:01 HLA-A*11:01 HLA-B*40:01 HLA-C*03:03",
})
return result_df
def handle_pmid_23481700(filename):
"""Hassan, ..., <NAME> Mol Cell Proteomics 2015 [PMID 23481700]"""
df = pandas.read_excel(filename, skiprows=10)
assert df["Peptide sequence"].iloc[0] == "TPSLVKSTSQL"
assert df["Peptide sequence"].iloc[-1] == "LPHSVNSKL"
hla = {
"JY": "HLA-A*02:01 HLA-B*07:02 HLA-C*07:02",
"HHC": "HLA-A*02:01 HLA-B*07:02 HLA-B*44:02 HLA-C*05:01 HLA-C*07:02",
}
results = []
for sample_id in ["JY", "HHC"]:
hits_df = df.loc[
df["Int %s" % sample_id].map(
lambda x: {"n.q.": 0, "n.q": 0}.get(x, x)).astype(float) > 0
]
result_df = pandas.DataFrame({
"peptide": hits_df["Peptide sequence"].dropna().values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = "B-LCL-" + sample_id
result_df["hla"] = hla[sample_id]
result_df["sample_type"] = "B-LCL"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
result_df["pulldown_antibody"] = "W6/32"
results.append(result_df)
result_df = pandas.concat(results, ignore_index=True)
# Rename samples to avoid a collision with the JY sample in PMID 25576301.
result_df.sample_id = result_df.sample_id.map({
"JY": "JY.2015",
"HHC": "HHC.2015",
})
return result_df
def handle_pmid_24616531(filename):
"""Mommen, ..., Heck PNAS 2014 [PMID 24616531]"""
df = pandas.read_excel(filename, sheet_name="EThcD")
peptides = df.Sequence.values
assert peptides[0] == "APFLRIAF"
assert peptides[-1] == "WRQAGLSYIRYSQI"
result_df = pandas.DataFrame({
"peptide": peptides,
})
result_df["sample_id"] = "24616531"
result_df["sample_type"] = "B-LCL"
result_df["cell_line"] = "GR"
result_df["pulldown_antibody"] = "W6/32"
# Note: this publication lists hla as "HLA-A*01,-03, B*07,-27, and -C*02,-07"
# we are guessing the exact 4 digit alleles based on this.
result_df["hla"] = "HLA-A*01:01 HLA-A*03:01 HLA-B*07:02 HLA-B*27:05 HLA-C*02:02 HLA-C*07:01"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_25576301(filename):
"""Bassani-Sternberg, ..., Mann Mol Cell Proteomics 2015 [PMID 25576301]"""
df = pandas.read_excel(filename, sheet_name="Peptides")
peptides = df.Sequence.values
assert peptides[0] == "AAAAAAAQSVY"
assert peptides[-1] == "YYYNGKAVY"
column_to_sample = {}
for s in [c for c in df if c.startswith("Intensity ")]:
assert s[-2] == "-"
column_to_sample[s] = s.replace("Intensity ", "")[:-2].strip()
intensity_columns = list(column_to_sample)
rows = []
for _, row in df.iterrows():
x1 = row[intensity_columns]
x2 = x1[x1 > 0].index.map(column_to_sample).value_counts()
x3 = x2[x2 >= 2] # require at least two replicates for each peptide
for sample in x3.index:
rows.append((row.Sequence, sample))
result_df = pandas.DataFrame(rows, columns=["peptide", "sample_id"])
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
allele_map = {
'Fib': "HLA-A*03:01 HLA-A*23:01 HLA-B*08:01 HLA-B*15:18 HLA-C*07:02 HLA-C*07:04",
'HCC1937': "HLA-A*23:01 HLA-A*24:02 HLA-B*07:02 HLA-B*40:01 HLA-C*03:04 HLA-C*07:02",
'SupB15WT': None, # four digit alleles unknown, will drop sample
'SupB15RT': None,
'HCT116': "HLA-A*01:01 HLA-A*02:01 HLA-B*45:01 HLA-B*18:01 HLA-C*05:01 HLA-C*07:01",
# Homozygous at HLA-A:
'HCC1143': "HLA-A*31:01 HLA-A*31:01 HLA-B*35:08 HLA-B*37:01 HLA-C*04:01 HLA-C*06:02",
# Homozygous everywhere:
'JY': "HLA-A*02:01 HLA-A*02:01 HLA-B*07:02 HLA-B*07:02 HLA-C*07:02 HLA-C*07:02",
}
sample_type = {
'Fib': "fibroblast",
'HCC1937': "basal like breast cancer",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "colon carcinoma",
'HCC1143': "basal like breast cancer",
'JY': "B-cell",
}
cell_line = {
'Fib': None,
'HCC1937': "HCC1937",
'SupB15WT': None,
'SupB15RT': None,
'HCT116': "HCT116",
'HCC1143': "HCC1143",
'JY': "JY",
}
result_df["hla"] = result_df.sample_id.map(allele_map)
print("Entries before dropping samples with unknown alleles", len(result_df))
result_df = result_df.loc[~result_df.hla.isnull()]
print("Entries after dropping samples with unknown alleles", len(result_df))
result_df["sample_type"] = result_df.sample_id.map(sample_type)
result_df["cell_line"] = result_df.sample_id.map(cell_line)
print(result_df.head(3))
return result_df
def handle_pmid_26992070(*filenames):
"""Ritz, ..., Fugmann Proteomics 2016 [PMID 26992070]"""
# Although this publication seems to suggest that HEK293 are C*07:02
# (figure 3B), in a subsequent publication [PMID 28834231] this group
# gives the HEK293 HLA type as HLA‐A*03:01, HLA‐B*07:02, and HLA‐C*07:01.
# We are therefore using the HLA‐C*07:01 (i.e. the latter) typing results
# here.
allele_text = """
Cell line HLA-A 1 HLA-A 2 HLA-B 1 HLA-B 2 HLA-C 1 HLA-C 2
HEK293 03:01 03:01 07:02 07:02 07:01 07:01
HL-60 01:01 01:01 57:01 57:01 06:02 06:02
RPMI8226 30:01 68:02 15:03 15:10 02:10 03:04
MAVER-1 24:02 26:01 38:01 44:02 05:01 12:03
THP-1 02:01 24:02 15:11 35:01 03:03 03:03
"""
allele_info = pandas.read_csv(
StringIO(allele_text), sep="\t", index_col=0)
allele_info.index = allele_info.index.str.strip()
for gene in ["A", "B", "C"]:
for num in ["1", "2"]:
allele_info[
"HLA-%s %s" % (gene, num)
] = "HLA-" + gene + "*" + allele_info["HLA-%s %s" % (gene, num)]
cell_line_to_allele = allele_info.apply(" ".join, axis=1)
sheets = {}
for f in filenames:
if f.endswith(".xlsx"):
d = pandas.read_excel(f, sheet_name=None, skiprows=1)
sheets.update(d)
dfs = []
for cell_line in cell_line_to_allele.index:
# Using data from DeepQuanTR, which appears to be a consensus between
# two other methods used.
sheet = sheets[cell_line + "_DeepQuanTR"]
replicated = sheet.loc[
sheet[[c for c in sheet if "Sample" in c]].fillna(0).sum(1) > 1
]
df = pandas.DataFrame({
'peptide': replicated.Sequence.values
})
df["sample_id"] = cell_line
df["hla"] = cell_line_to_allele.get(cell_line)
dfs.append(df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["pulldown_antibody"] = "W6/32"
result_df["cell_line"] = result_df["sample_id"]
result_df["sample_type"] = result_df.sample_id.map({
"HEK293": "hek",
"HL-60": "neutrophil",
"RPMI8226": "b-cell",
"MAVER-1": "b-LCL",
"THP-1": "monocyte",
})
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_27412690(filename):
"""Shraibman, ..., Admon Mol Cell Proteomics 2016 [PMID 27412690]"""
hla_types = {
"U-87": "HLA-A*02:01 HLA-B*44:02 HLA-C*05:01",
"T98G": "HLA-A*02:01 HLA-B*39:06 HLA-C*07:02",
"LNT-229": "HLA-A*03:01 HLA-B*35:01 HLA-C*04:01",
}
sample_id_to_cell_line = {
"U-87": "U-87",
"T98G": "T98G",
"LNT-229": "LNT-229",
"U-87+DAC": "U-87",
"T98G+DAC": "T98G",
"LNT-229+DAC": "LNT-229",
}
df = pandas.read_excel(filename)
assert df.Sequence.iloc[0] == "AAAAAAGSGTPR"
intensity_col_to_sample_id = {}
for col in df:
if col.startswith("Intensity "):
sample_id = col.split()[1]
assert sample_id in sample_id_to_cell_line, (col, sample_id)
intensity_col_to_sample_id[col] = sample_id
dfs = []
for (sample_id, cell_line) in sample_id_to_cell_line.items():
intensity_cols = [
c for (c, v) in intensity_col_to_sample_id.items()
if v == sample_id
]
hits_df = df.loc[
(df[intensity_cols] > 0).sum(1) > 1
]
result_df = pandas.DataFrame({
"peptide": hits_df.Sequence.values,
})
result_df["sample_id"] = sample_id
result_df["cell_line"] = cell_line
result_df["hla"] = hla_types[cell_line]
dfs.append(result_df)
result_df = pandas.concat(dfs, ignore_index=True)
result_df["sample_type"] = "glioblastoma"
result_df["pulldown_antibody"] = "W6/32"
result_df["mhc_class"] = "I"
result_df["format"] = "multiallelic"
return result_df
def handle_pmid_28832583(*filenames):
"""Bassani-Sternberg, ..., Gfeller PLOS Comp. Bio. 2017 [PMID 28832583]"""
# This work also reanalyzes data from
# Pearson, ..., <NAME> Invest 2016 [PMID 27841757]
(filename_dataset1, filename_dataset2) = sorted(filenames)
dataset1 = pandas.read_csv(filename_dataset1, sep="\t")
dataset2 = | pandas.read_csv(filename_dataset2, sep="\t") | pandas.read_csv |
from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers import Dense, Activation, Dropout, BatchNormalization
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
# imputer for missing data
imputer = Imputer(missing_values=np.nan, strategy='mean')
# load training data from csv file in the directory
train_test_data = pd.read_csv('train.csv', index_col=0) #read data
train_test_data['is_dev'] = 0 #add an extra column to specify that this is not
#for dev. This is important because later on, we will concatenate all the data,
#and process it and we want all the data to be processed the same way.
dev_data = pd.read_csv('test.csv', index_col=0) #read "dev" data
dev_data['is_dev'] = 1 #add an extra column that this is dev data
#concatenate both dataframes
all_data = | pd.concat((train_test_data, dev_data), axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
'''
Created on 30 jun 2016
@author: a001985
'''
import codecs
import re
import os
import datetime
import time
import sys
try:
import pandas as pd
except:
pass
odv_directory_path = os.path.dirname(os.path.abspath(__file__))
# from .. import mappinglib as mapping
from sharkpylib import mappinglib as mapping
"""
===============================================================================
===============================================================================
"""
class SpreadsheetFile():
"""
Class to hande vocabulary things in ODV spreadsheet file
"""
#==========================================================================
def __init__(self, file_path=None):
self.file_path = file_path
def get_edited_flags(self, qf_prefix='QF_', qf_suffix=''):
def is_header_row(row):
if row.startswith('Cruise\t'):
return True
return False
def is_data_row(row):
if any([is_header_row(row), is_comment_row(row)]):
return False
return True
def row_has_metadata(row):
if not is_data_row(row):
return False
if row.startswith('\t'):
return False
return True
def is_comment_row(row):
if row.startswith('//'):
return True
return False
def is_flag_row(row):
if 'EDITFLAGS' in row:
return True
return False
def get_key(cdata):
time_object = datetime.datetime.strptime(cdata[time_par], time_format)
if data_type == 'Profiles':
return (time_object, cdata[header[7]])
else:
return (time_object, time_object)
time_format = '%Y-%m-%dT%H:%M:%S'
time_par = 'yyyy-mm-ddThh:mm:ss.sss'
data = {}
with codecs.open(self.file_path) as fid:
header = None
metadata = None
line_data = None
current_data = {}
key = None
data_type = ''
for i, line in enumerate(fid):
if '<DataType>' in line:
data_type = line.split('<')[1].split('>')[1]
continue
split_line = [item.strip() for item in line.split('\t')]
if is_header_row(line):
header = split_line
continue
if row_has_metadata(line):
metadata = split_line[:7]
if is_data_row(line):
line_data = metadata + split_line[7:]
if is_comment_row(line):
metadata = None
line_data = None
if line_data:
current_data = {}
h0 = ''
for h, d in zip(header, line_data):
h1 = h
# if h == 'QF':
# h1 = qf_prefix + h0 + qf_suffix
# else:
# h0 = h
current_data[h1] = d
# current_data = dict(zip(header, line_data))
# key = ';'.join([current_data['yyyy-mm-ddThh:mm:ss.sss'],
# current_data['Longitude [degrees_east]'],
# current_data['Latitude [degrees_north]'],
# current_data[header[7]]])
key = get_key(current_data)
data.setdefault(key, {})
for col, value in current_data.items():
data[key][col] = value
if is_flag_row(line):
data[key].setdefault('flags', {})
all_info = line.split('\t')[3]
par = all_info.split('@')[0].strip()
flag = all_info.split('->')[-1].split('<')[0].strip()
data[key]['flags'][par] = flag
for key in list(data.keys()):
if not data[key].get('flags'):
data.pop(key)
fdata = {}
for key, value in data.items():
d = key[0]
for par, f in value['flags'].items():
fdata.setdefault(par, {})
fdata[par].setdefault(f, [])
fdata[par][f].append(key)
return fdata
#==========================================================================
def set_negative_value_to_zero(self, output_file_path):
re_string = '\t-.+?\t'
fid = codecs.open(self.file_path, 'r')
fid_out = codecs.open(output_file_path, 'w')
for line in fid:
fid_out.write(re.sub(re_string, u'\t0\t', line))
fid.close()
fid_out.close()
#==========================================================================
def get_local_cdi_list(self, print_to_file='', show_process_every=100000):
"""
Created: 20180523
Updated: 20180613
Returns a list of all local_cdi_id:s found in the spreadseet file
"""
t0 = time.time()
cdi_set = set()
print('='*50)
print(self.file_path)
print('-'*50)
with codecs.open(self.file_path) as fid:
for k, line in enumerate(fid):
if show_process_every:
if not k%show_process_every:
print('Working on line {} as time {}'.format(k, time.time()-t0))
if line.startswith('//'):
continue
elif line.startswith('Cruise'):
continue
else:
split_line = line.split('\t')
if split_line[0].strip(): # added by MW 20180613
cdi_set.add(split_line[6])
if show_process_every:
print('Number of lines: {}'.format(k))
print('Number of local_cdi_id is: {}'.format(len(cdi_set)))
sorted_cdi_set = sorted(cdi_set)
if print_to_file:
with codecs.open(print_to_file, 'w') as fid:
fid.write('\n'.join(sorted_cdi_set))
return sorted_cdi_set
#==========================================================================
def get_odv_station_count(self, show_process_every=100000):
"""
Created: 20180613
Updated:
Returns the number of stations acording to odv.
A station is identified as a non comment (or Cruise) row having value in first column (Cruise).
"""
t0 = time.time()
nr_stations = 0
print('='*50)
print(self.file_path)
print('-'*50)
with codecs.open(self.file_path) as fid:
for k, line in enumerate(fid):
if show_process_every:
if not k%show_process_every:
print('Working on line {} as time {}'.format(k, time.time()-t0))
if line.startswith('//'):
continue
elif line.startswith('Cruise'):
continue
else:
split_line = line.split('\t')
if split_line[0].strip():
nr_stations += 1
if show_process_every:
print('Number of lines: {}'.format(k))
return nr_stations
#==========================================================================
def get_unique_list(self, col, print_to_file='', show_process_every=100000, **kwargs):
"""
Created: 20180605
Updated: 20181010
Returns a list of all unique values of the given column in the spreadseet file
"""
t0 = time.time()
data_set = set()
if show_process_every:
print('='*50)
print(self.file_path)
print('-'*50)
with codecs.open(self.file_path, encoding=kwargs.get('encoding', 'utf8')) as fid:
for k, line in enumerate(fid):
if show_process_every:
if not k%show_process_every:
print('Working on line {} as time {}'.format(k, time.time()-t0))
if line.startswith('//'):
continue
elif line.startswith('Cruise'):
header = line.split('\t')
if col == 'id':
continue
else:
if col not in header:
print('Column "{}" not in header!'.format(col))
return False
index = header.index(col)
continue
else:
split_line = line.split('\t')
if kwargs.get('metadata') and not split_line[0]:
continue
if col == 'id':
# Combine several columns
data_set_list = []
time_string = split_line[header.index('yyyy-mm-ddThh:mm:ss.sss')]
if not time_string.strip():
continue
try:
time_object = get_datetime_object(time_string)
except:
print(self.file_path)
print('k', k)
print(time_string)
data_set_list.append(time_object.strftime('%Y-%m-%d'))
if kwargs.get('include_time', True):
data_set_list.append(time_object.strftime('%H:%M'))
else:
data_set_list.append('')
# print(header.index('Latitude [degrees_north]'))
# print(split_line[header.index('Latitude [degrees_north]')])
# print(split_line)
lat = str(mapping.to_decmin(mapping.sdate_from_odv_time_string(split_line[header.index('Latitude [degrees_north]')])))[:kwargs.get('id_pos_precision', 6)]
lon = str(mapping.to_decmin(mapping.sdate_from_odv_time_string(split_line[header.index('Longitude [degrees_east]')])))[:kwargs.get('id_pos_precision', 6)]
# lat = geography.decdeg_to_decmin(split_line[header.index('Latitude [degrees_north]')], string_type=True)[:kwargs.get('id_pos_precision', 6)]
# lon = geography.decdeg_to_decmin(split_line[header.index('Longitude [degrees_east]')], string_type=True)[:kwargs.get('id_pos_precision', 6)]
data_set_list.append(lat)
data_set_list.append(lon)
data_set.add('_'.join(data_set_list))
else:
if not split_line[index]:
print(k)
data_set.add(split_line[index])
if show_process_every:
print('Number of lines: {}'.format(k))
print('Number of "{}" is: {}'.format(col, len(data_set)))
sorted_data_set = sorted(data_set)
if print_to_file:
with codecs.open(print_to_file, 'w') as fid:
fid.write('\n'.join(sorted_data_set))
return sorted_data_set
# ==========================================================================
def get_vocab_list(self, vocab='P01', sort=False, save_file_path=None, **kwargs):
"""
Function to create P01 list from txt-files.
"""
vocab_code_list = []
with codecs.open(self.file_path, 'r', encoding=kwargs.get('encoding', 'utf8')) as fid:
for line in fid:
if line.startswith(u'Cruise'):
break
try:
re_string = '(?<={}::)[ ]*[A-Z0-9]+'.format(vocab.upper())
vocab_string = re.findall(re_string, line)[0].strip()
vocab_code = vocab_string.split(u':')[-1]
if not vocab_code:
print(line)
vocab_code_list.append(vocab_code)
except:
pass
vocab_code_list = list(set(vocab_code_list))
if sort:
vocab_code_list = sorted(vocab_code_list)
if save_file_path:
with codecs.open(save_file_path, 'w') as fid:
fid.write('\n'.join(vocab_code_list))
return vocab_code_list
#==========================================================================
def create_row_data(self, print_to_file='', show_process_every=1000, **kwargs):
"""
Created: 20180831
Updated:
Extracts data in a row data format.
"""
t0 = time.time()
metadata_dict = {}
data_dict = {}
data = []
current_metadata_list = []
current_metadata_dict = {}
keep_pars = kwargs.get('keep_as_id', [])
if type(keep_pars) != list:
keep_pars = [keep_pars]
vocab_dict = {}
with codecs.open(self.file_path, encoding='utf8') as fid:
for k, line in enumerate(fid):
if show_process_every:
if not k%show_process_every:
print('Working on line {} as time {}'.format(k, time.time()-t0))
if line.startswith('//<MetaVariable>'):
par = line.split('="')[1].split('"')[0]
metadata_dict[par] = False
metadata_dict['yyyy-mm-ddThh:mm:ss.sss'] = False
elif line.startswith('//<DataVariable>'):
par = line.split('="')[1].split('"')[0]
# Check primary variable
if 'is_primary_variable="T"' in line:
metadata_dict[par] = False
else:
data_dict[par] = False
vocab_dict[par] = get_vocabs_from_string(line)
# nr_metadata_rows += 1
elif line.startswith('//'):
continue
elif line.startswith('Cruise'):
header = line.split('\t')
# header_dict = dict((item, k) for k, item in enumerate(header))
# Find columns for data and metadata
# for key in metadata_dict.keys():
# metadata_dict[key] = header_dict[key]
# for key in data_dict.keys():
# data_dict[key] = header_dict[key]
# Check which vocabularies to add. Needs to be done after Data variable check
vocab_set = set()
for par in vocab_dict:
vocab_set.update(vocab_dict[par].keys())
vocab_list = sorted(vocab_set)
else:
split_line = line.split('\t')
line_dict = dict(zip(header, split_line))
# Save metadata line
if split_line[3]: # time
for item in metadata_dict:
current_metadata_dict[item] = line_dict[item]
else:
for item in metadata_dict:
line_dict[item] = current_metadata_dict[item]
# print(split_line)
# # Update metadata. If metadata variable is missing the previos metadata is used
# for item in metadata_dict:
# print(item)
# if not line_dict[item]:
# line_dict[item] = current_metadata_dict[item]
# else:
# current_metadata_dict[item] = line_dict[item]
# # Add keep parameters. They will be unique for each row
# for par in keep_pars:
# data_line.append(line_dict[par])
# if split_line[metadata_dict['Station']]:
# # First row for station. Add information to be used on the rest of the lines
# current_metadata_list = split_line[:len(metadata_dict)]
# Add data
for par in sorted(data_dict):
data_line = []
# Add metadata
for item in header: # To get the right order
if item in metadata_dict:
data_line.append(line_dict[item])
# Add data from line
# # Metadata first
# data_line = current_metadata_list[:]
# Then vocabulary
for voc in vocab_list:
data_line.append(vocab_dict.get(par, {}).get(voc, ''))
# Then data
data_line.append(par) # Parameter
data_line.append(split_line[data_dict[par]]) # Value
data_line.append(split_line[data_dict[par]+1]) # QF
# Add line to data
# print(len(data_line))
data.append(data_line)
# Create dataframe
data_header = [item for item in header if item in metadata_dict] + vocab_list + ['parameter', 'value', 'qflag']
# print(len(data_header), len(data[0]))
self.row_df = pd.DataFrame(data, columns=data_header)
# Map header
mapping_file_path = os.path.join(odv_directory_path, 'odv_parameter_mapping.txt')
parameter_mapping = mapping.ParameterMapping(mapping_file_path, from_col='ODV', to_col='SMHI')
new_header = [parameter_mapping.get_mapping(item) for item in self.row_df.columns]
self.row_df.columns = new_header
# for par in sorted(self.row_df.columns):
# print(par)
# Refine data
for key in kwargs:
if key in self.row_df.columns:
value = kwargs[key]
if type(value) == str:
value = [value]
self.row_df = self.row_df.loc[self.row_df[key].isin(value), :]
self.data_dict = data_dict
self.metadata_dict = metadata_dict
# Add columns
self.row_df['SDATE'] = self.row_df['odv_time_string'].apply(mapping.sdate_from_odv_time_string)
self.row_df['STIME'] = self.row_df['odv_time_string'].apply(mapping.stime_from_odv_time_string)
# Convert lat lon
self.row_df['LATIT'] = self.row_df['LATIT'].apply(mapping.to_decmin)
self.row_df['LONGI'] = self.row_df['LONGI'].apply(mapping.to_decmin)
# Add MYEAR
if kwargs.get('add_myear'):
self.row_df['MYEAR'] = self.row_df['SDATE'].apply(lambda x: int(x[:4]))
# Add source column
self.row_df['source'] = kwargs.get('source', kwargs.get('source_column', 'odv'))
self._add_id_column()
if print_to_file:
kw = {'sep': '\t',
'encoding': 'cp1252',
'index': False}
for k in kw:
if k in kwargs:
kw[k] = kwargs[k]
self.row_df.to_csv(print_to_file, **kw)
#==========================================================================
def _add_id_column(self):
self.row_df['id'] = self.row_df['SDATE'].astype(str) + '_' + \
self.row_df['STIME'].astype(str) + '_' + \
self.row_df['LATIT'].apply(lambda x: x[:7]).astype(str) + '_' + \
self.row_df['LONGI'].apply(lambda x: x[:7]).astype(str)
# astype(str) ???
"""
===============================================================================
===============================================================================
"""
class SpreadsheetFileColumns():
"""
Class takes an ODV spreadsheet file, removes all comment lines and reads as pandas dataframe.
OBS! Make sure NOT to "Use compact output".
"""
# ==========================================================================
def __init__(self, file_path=None, **kwargs):
self.file_path = file_path
self._load_data()
self._add_columns(**kwargs)
def _load_data(self):
if type(self.file_path) == list:
self._load_several_files()
return
data_lines = []
print('utf8')
with codecs.open(self.file_path, encoding='utf8') as fid:
for line in fid:
if line.startswith('//'):
continue
else:
split_line = line.strip().split('\t')
if line.startswith('Cruise'):
# Header
header = split_line
else:
# Data line
data_lines.append(split_line)
self.df = pd.DataFrame(data_lines, columns=header)
def _load_several_files(self):
dfs = {}
headers = {}
for file_path in self.file_path:
data_lines = []
with codecs.open(file_path, encoding='utf8') as fid:
for line in fid:
if line.startswith('//'):
continue
else:
split_line = line.strip().split('\t')
if line.startswith('Cruise'):
# Header
header = split_line
new_header = []
latest = None
for h in header:
if h == 'QV:SEADATANET':
new_header.append(' - '.join([latest, h]))
else:
latest = h
new_header.append(h)
headers[file_path] = new_header
else:
# Data line
data_lines.append(split_line)
dfs[file_path] = | pd.DataFrame(data_lines, columns=new_header) | pandas.DataFrame |
import pandas as pd
from functools import reduce
from fooltrader.contract.files_contract import *
import re
import json
class agg_future_dayk(object):
funcs={}
def __init__(self):
self.funcs['shfeh']=self.getShfeHisData
self.funcs['shfec']=self.getShfeCurrentYearData
self.funcs['ineh']=self.getIneHisData
self.funcs['inec']=self.getIneCurrentYearData
self.funcs['dceh']=self.getDceHisData
self.funcs['dcec']=self.getDceCurrentYearData
self.funcs['czceh']=self.getCzceHisData
self.funcs['czcec']=self.getCzceCurrentYearData
self.funcs['cffexh']=self.getCffexHisData
self.funcs['cffexc']=self.getCffexCurrentYearData
def getCurrentYearAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getAllData(self,exchange=None):
if exchange is None:
exchanges=['cffex','dce','czce','shfe',"ine"]
pds = list(map(lambda x:self.getHisData(x),exchanges))+list(map(lambda x:self.getCurrentYearData(x),exchanges))
finalpd = pd.concat(pds)
else:
finalpd= pd.concat([self.getHisData(exchange),self.getCurrentYearData(exchange)])
for i in ['volume','inventory']:
finalpd[i]=finalpd[i].apply(lambda x:pd.to_numeric(str(x).replace(",", "")))
finalpd.set_index(['date','fproduct','symbol'], inplace=True)
finalpd.sort_index(inplace=True)
return finalpd
def getHisData(self,exchange):
return self.funcs[exchange+'h']()
def getCurrentYearData(self,exchange):
return self.funcs[exchange+'c']()
def getShfeHisData(self):
pattern = re.compile(r'(\D{1,3})(\d{3,4}).*')
dfs=[]
dir = get_exchange_cache_dir(security_type='future',exchange='shfe')+"/his/"
for j in os.listdir(dir):
a = pd.read_excel(dir+j, header=2, skipfooter=5,
usecols=list(range(0, 14))).fillna(method='ffill')
dfs.append(a)
totaldf = reduce(lambda x,y:x.append(y),dfs)
totaldf['日期']=pd.to_datetime(totaldf['日期'],format='%Y%m%d')
totaldf=totaldf[pd.isnull(totaldf['合约'])==False]
totaldf['fproduct'] = totaldf['合约'].apply(lambda x:pattern.match(x).groups()[0])
totaldf['settleDate'] = totaldf['合约'].apply(lambda x:pd.to_datetime('20'+pattern.match(x).groups()[1],format='%Y%m'))
renameMap={
'合约':'symbol',
'日期':'date',
'前收盘':'preClose',
'前结算':'preSettle',
'开盘价':'open',
'最高价':'high',
'最低价':'low',
'收盘价':'close',
'结算价':'settle',
'涨跌1':'range',
'涨跌2':'range2',
'成交量':'volume',
'成交金额':'amount',
'持仓量':'inventory'
}
totaldf.rename(index=str,columns=renameMap,inplace=True)
totaldf=totaldf[['symbol','date','open','high','low','close','settle','range','range2','volume','inventory','fproduct','settleDate']]
print("done")
# totaldf.to_pickle('testdf.pickle')
return totaldf
def getShfeCurrentYearData(self):
dir = os.path.join(get_exchange_cache_dir(security_type='future',exchange='shfe'),"2020_day_kdata")
file_list=os.listdir(dir)
tempdfs=[]
for file in file_list:
if len(file)==8:
with open(os.path.join(dir,file)) as f:
load_dict = json.load(f)
temp_df = pd.DataFrame(data=load_dict['o_curinstrument'])
temp_df['date'] = file
temp_df['date'] = pd.to_datetime(temp_df['date'],format="%Y%m%d")
tempdfs.append(temp_df)
aggdf=pd.concat(tempdfs)
aggdf= aggdf[aggdf['DELIVERYMONTH']!='小计' ]
aggdf= aggdf[aggdf['DELIVERYMONTH']!='合计' ]
aggdf= aggdf[aggdf['DELIVERYMONTH']!=""]
aggdf= aggdf[aggdf['DELIVERYMONTH']!="efp"]
aggdf['symbol']=aggdf['PRODUCTID'].apply(lambda x:x.strip().replace("_f",""))+aggdf['DELIVERYMONTH']
aggdf['fproduct']=aggdf['PRODUCTID'].apply(lambda x:x.strip().replace("_f",""))
aggdf['settleDate']=aggdf['DELIVERYMONTH'].apply(lambda x:pd.to_datetime('20'+x,format='%Y%m'))
renameMap={
'OPENPRICE':'open',
'HIGHESTPRICE':'high',
'LOWESTPRICE':'low',
'CLOSEPRICE':'close',
'SETTLEMENTPRICE':'settle',
'ZD1_CHG':'range',
'ZD2_CHG':'range2',
'VOLUME':'volume',
'OPENINTEREST':'inventory'
}
aggdf.rename(index=str,columns=renameMap,inplace=True)
aggdf=aggdf[['symbol','date','open','high','low','close','settle','range','range2','volume','inventory','fproduct','settleDate']]
return aggdf
def getIneHisData(self):
pattern = re.compile(r'(\D{1,3})(\d{3,4}).*')
dfs=[]
dir = get_exchange_cache_dir(security_type='future',exchange='ine')+"/his/"
for j in os.listdir(dir):
a = pd.read_excel(dir+j, header=2, skipfooter=5,
usecols=list(range(0, 14))).fillna(method='ffill')
dfs.append(a)
totaldf = reduce(lambda x,y:x.append(y),dfs)
totaldf['日期']=pd.to_datetime(totaldf['日期'],format='%Y%m%d')
totaldf=totaldf[pd.isnull(totaldf['合约'])==False]
totaldf['fproduct'] = totaldf['合约'].apply(lambda x:pattern.match(x).groups()[0])
totaldf['settleDate'] = totaldf['合约'].apply(lambda x:pd.to_datetime('20'+pattern.match(x).groups()[1],format='%Y%m'))
renameMap={
'合约':'symbol',
'日期':'date',
'前收盘':'preClose',
'前结算':'preSettle',
'开盘价':'open',
'最高价':'high',
'最低价':'low',
'收盘价':'close',
'结算价':'settle',
'涨跌1':'range',
'涨跌2':'range2',
'成交量':'volume',
'成交金额':'amount',
'持仓量':'inventory'
}
totaldf.rename(index=str,columns=renameMap,inplace=True)
totaldf=totaldf[['symbol','date','open','high','low','close','settle','range','range2','volume','inventory','fproduct','settleDate']]
print("done")
# totaldf.to_pickle('testdf.pickle')
return totaldf
def getIneCurrentYearData(self):
dir = os.path.join(get_exchange_cache_dir(security_type='future',exchange='ine'),"2020_day_kdata")
file_list=os.listdir(dir)
tempdfs=[]
for file in file_list:
if len(file)==8:
with open(os.path.join(dir,file)) as f:
load_dict = json.load(f)
temp_df = pd.DataFrame(data=load_dict['o_curinstrument'])
temp_df['date'] = file
temp_df['date'] = pd.to_datetime(temp_df['date'],format="%Y%m%d")
tempdfs.append(temp_df)
aggdf= | pd.concat(tempdfs) | pandas.concat |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks= | pd.DataFrame(k) | pandas.DataFrame |
# coding: utf-8
# In[ ]:
#### 標準化などの処理
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, Normalizer
def get_standardized( X_train, X_test = None ) :
#あとで dataframeに変換するときむけの列名
X_columns = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
if X_test is not None :
#scaler.fit(X_test)
X_test = scaler.transform(X_test)
#dataframeに変換
X_train = pd.DataFrame(X_train, columns=X_columns)
if X_test is not None :
X_test = pd.DataFrame(X_test, columns=X_columns)
return {'train' : X_train, 'test' : X_test}
def get_min_max( X_train, X_test = None ) :
#あとで dataframeに変換するときむけの列名
X_columns = X_train.columns
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
if X_test is not None :
#scaler.fit(X_test)
X_test = scaler.transform(X_test)
#dataframeに変換
X_train = pd.DataFrame(X_train, columns=X_columns)
if X_test is not None :
X_test = pd.DataFrame(X_test, columns=X_columns)
return {'train' : X_train, 'test' : X_test}
def get_robust( X_train, X_test = None, X_predict = None, has_column = True ) :
#あとで dataframeに変換するときむけの列名
if has_column :
X_columns = X_train.columns
scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
if X_test is not None :
#scaler.fit(X_test)
X_test = scaler.transform(X_test)
if X_predict is not None :
#scaler.fit(X_test)
X_predict = scaler.transform(X_predict)
#dataframeに変換
if has_column :
X_train = pd.DataFrame(X_train, columns=X_columns)
if X_test is not None and has_column :
X_test = | pd.DataFrame(X_test, columns=X_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: Faron
"""
import pandas as pd
import numpy as np
import xgboost as xgb
import gc
DATA_DIR = "/media/ubuntu/新加卷2/Kaggle/Featured/Bosch"
ID_COLUMN = 'Id'
TARGET_COLUMN = 'Response'
SEED = 0
CHUNKSIZE = 10000
NROWS = 250000
TRAIN_NUMERIC = "{0}/train_numeric.csv".format(DATA_DIR)
TRAIN_DATE = "{0}/train_date.csv".format(DATA_DIR)
TEST_NUMERIC = "{0}/test_numeric.csv".format(DATA_DIR)
TEST_DATE = "{0}/test_date.csv".format(DATA_DIR)
FILENAME = "etimelhoods"
train = pd.read_csv(TRAIN_NUMERIC, usecols=[ID_COLUMN, TARGET_COLUMN])
test = | pd.read_csv(TEST_NUMERIC, usecols=[ID_COLUMN]) | pandas.read_csv |
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import calendar
import time
from datetime import datetime
import pytz
from scipy import stats
from os.path import exists
# an instance of apple Health
# fname is the name of data file to be parsed must be an XML files
# flags for cache
class AppleHealth:
def __init__(self, fname = 'export.xml', pivotIndex = 'endDate', readCache = False, writeCache = False):
#check cache flag and cache accordingly
if readCache:
a = time.time()
self.readCache(fname)
e = time.time()
self.runtime = e-a
print("Cache parsing Time = {}".format(e-a))
if writeCache:
self.cacheAll(fname)
return
# create element tree object
a = time.time()
s = time.time()
tree = ET.parse(fname)
e = time.time()
print("Tree parsing Time = {}".format(e-s))
# for every health record, extract the attributes into a dictionary (columns). Then create a list (rows).
s = time.time()
root = tree.getroot()
record_list = [x.attrib for x in root.iter('Record')]
workout_list = [x.attrib for x in root.iter('Workout')]
e = time.time()
print("record list Time = {}".format(e-s))
# create DataFrame from a list (rows) of dictionaries (columns)
s = time.time()
self.record_data = pd.DataFrame(record_list)
self.workout_data = pd.DataFrame(workout_list)
e = time.time()
print("creating DF Time = {}".format(e-s))
format = '%Y-%m-%d %H:%M:%S'
# proper type to dates
def get_split_date(strdt):
split_date = strdt.split()
str_date = split_date[1] + ' ' + split_date[2] + ' ' + split_date[5] + ' ' + split_date[3]
return str_date
s = time.time()
for col in ['creationDate', 'startDate', 'endDate']:
self.record_data[col] = pd.to_datetime(self.record_data[col], format=format)
if not self.workout_data.empty:
self.workout_data[col] = | pd.to_datetime(self.workout_data[col], format=format) | pandas.to_datetime |
from flask import Flask, request, jsonify, g, render_template
from flask_json import FlaskJSON, JsonError, json_response, as_json
import plotly.graph_objects as go
from datetime import datetime
from datetime import timedelta
import glob
import requests
from app import db
from app.models import *
from app.plots import bp
import pandas as pd
import io
from app.api import vis
from sqlalchemy import sql
import numpy as np
from app.tools.curvefit.core.model import CurveModel
from app.tools.curvefit.core.functions import gaussian_cdf, gaussian_pdf
PHU = {'the_district_of_algoma':'The District of Algoma Health Unit',
'brant_county':'Brant County Health Unit',
'durham_regional':'Durham Regional Health Unit',
'grey_bruce':'Grey Bruce Health Unit',
'haldimand_norfolk':'Haldimand-Norfolk Health Unit',
'haliburton_kawartha_pine_ridge_district':'Haliburton, Kawartha, Pine Ridge District Health Unit',
'halton_regional':'Halton Regional Health Unit',
'city_of_hamilton':'City of Hamilton Health Unit',
'hastings_and_prince_edward_counties':'Hastings and Prince Edward Counties Health Unit',
'huron_county':'Huron County Health Unit',
'chatham_kent':'Chatham-Kent Health Unit',
'kingston_frontenac_and_lennox_and_addington':'Kingston, Frontenac, and Lennox and Addington Health Unit',
'lambton':'Lambton Health Unit',
'leeds_grenville_and_lanark_district':'Leeds, Grenville and Lanark District Health Unit',
'middlesex_london':'Middlesex-London Health Unit',
'niagara_regional_area':'Niagara Regional Area Health Unit',
'north_bay_parry_sound_district':'North Bay Parry Sound District Health Unit',
'northwestern':'Northwestern Health Unit',
'city_of_ottawa':'City of Ottawa Health Unit',
'peel_regional':'Peel Regional Health Unit',
'perth_district':'Perth District Health Unit',
'peterborough_county_city':'Peterborough County–City Health Unit',
'porcupine':'Porcupine Health Unit',
'renfrew_county_and_district':'Renfrew County and District Health Unit',
'the_eastern_ontario':'The Eastern Ontario Health Unit',
'simcoe_muskoka_district':'Simcoe Muskoka District Health Unit',
'sudbury_and_district':'Sudbury and District Health Unit',
'thunder_bay_district':'Thunder Bay District Health Unit',
'timiskaming':'Timiskaming Health Unit',
'waterloo':'Waterloo Health Unit',
'wellington_dufferin_guelph':'Wellington-Dufferin-Guelph Health Unit',
'windsor_essex_county':'Windsor-Essex County Health Unit',
'york_regional':'York Regional Health Unit',
'southwestern':'Southwestern Public Health Unit',
'city_of_toronto':'City of Toronto Health Unit',
'huron_perth_county':'Huron Perth Public Health Unit'}
def get_dir(data, today=datetime.today().strftime('%Y-%m-%d')):
source_dir = 'data/' + data['classification'] + '/' + data['stage'] + '/'
load_dir = source_dir + data['source_name'] + '/' + data['table_name']
file_name = data['table_name'] + '_' + today + '.' + data['type']
file_path = load_dir + '/' + file_name
return load_dir, file_path
def get_file(data):
load_dir, file_path = get_dir(data)
files = glob.glob(load_dir + "/*." + data['type'])
files = [file.split('_')[-1] for file in files]
files = [file.split('.csv')[0] for file in files]
dates = [datetime.strptime(file, '%Y-%m-%d') for file in files]
max_date = max(dates).strftime('%Y-%m-%d')
load_dir, file_path = get_dir(data, max_date)
return file_path
## Tests
def new_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New tests'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New tests'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New tests'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New tests'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New tests'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Tests<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="new tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_tests_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Total tested'].tail(1).values[0],
number = {'font': {'size': 60}},
))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Total tested'],line=dict(color='#5E5AA1',dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['Total tested'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Total tested'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Tested<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tests").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def tested_positve_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['New Positive pct'].notna()]
temp = df.loc[df['New Positive pct'] > 0]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New Positive pct'].tail(1).values[0]*100,
number = {'font': {'size': 60}}
))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['New Positive pct'],line=dict(color='#FFF', dash='dot'),visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['New Positive pct'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['New Positive pct'].iloc[-2]*100,
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text': f"Percent Positivity<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="tested positive").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def under_investigation_plot():
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Total tested'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Under Investigation'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Under Investigation'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=temp['Under Investigation'].rolling(7).mean(),line=dict(color='red',width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Under Investigation'].iloc[-2],
'increasing': {'color':'grey'},
'decreasing': {'color':'grey'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Under Investigation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="under investigation").first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Hospital
def in_hospital_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Hospitalized'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Hospitalized'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Hospitalized'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Hospitalized'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In Hospital<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
div = fig.to_json()
p = Viz.query.filter_by(header="in hospital", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def in_icu_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['ICU'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['ICU'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['ICU'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['ICU'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'title' : {"text": f"COVID-19 Patients In ICU<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>"},
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':"",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="in icu", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def on_ventilator_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
temp = df.loc[df['Ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['Ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['Ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['Ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True,'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':False},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
temp = df.loc[df['confirmed_positive_ventilator'].notna()]
fig.add_trace(go.Indicator(
mode = "number+delta",
value = temp['confirmed_positive_ventilator'].tail(1).values[0],number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'],line=dict(color='red', width=3),visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=temp.Date,y=temp['confirmed_positive_ventilator'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['confirmed_positive_ventilator'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"COVID-19 Patients On Ventilator<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="on ventilator", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Cases
def new_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New positives'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['New positives'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['New positives'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
else:
df = vis.get_phus()
df = df.loc[df.region == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="new cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="new cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def active_cases_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df['Active'].notna()]
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Active'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Active'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Active'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Active'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Active Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
div = fig.to_json()
p = Viz.query.filter_by(header="active cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_cases_plot(region='ontario'):
if region=='ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Positives'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Positives'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Positives'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Positives'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_phus()
df = df.loc[df.region == PHU[region]]
df['value'] = df.value.cumsum()
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
print(region)
p = Viz.query.filter_by(header="cases",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def recovered_plot(region='ontario'):
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Resolved'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Resolved'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Resolved'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Resolved'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Recovered Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h")
div = fig.to_json()
p = Viz.query.filter_by(header="recovered", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def total_deaths_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['Deaths'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['Deaths'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['Deaths'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['Deaths'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_phudeath()
df = df.loc[df.region == PHU[region]]
df['value'] = df.value.cumsum()
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="deaths", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="deaths",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def new_deaths_plot(region='ontario'):
if region == 'ontario':
df = vis.get_testresults()
df['Date'] = pd.to_datetime(df['Date'])
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['New deaths'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['New deaths'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['New deaths'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['New deaths'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
df = vis.get_phudeath()
df = df.loc[df.region == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="new deaths", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['value'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['value'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['value'],line=dict(color='#FFF', dash='dot'), visible=True, opacity=0.5, name="Value"))
fig.add_trace(go.Scatter(x=df.Date,y=df['value'].rolling(7).mean(),line=dict(color='red', width=3), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"New Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="new deaths", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def ltc_cases_plot(region='ontario'):
if region == 'ontario':
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=0"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.dropna(how='any')
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['LTC Cases Total'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['LTC Cases Total'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Cases Total'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Cases Total'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total LTC Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=689073638"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df.PHU == PHU[region]]
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="long term care cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = df.groupby('Date')['Confirmed Resident Cases'].sum().tail(1).values[0],
number = {'font': {'size': 60}},))
fig.update_layout(
showlegend=False,
template = {'data' : {'indicator': [{
'mode' : "number",
},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Confirmed Resident Cases<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="long term care cases", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def ltc_deaths_plot(region='ontario'):
if region == 'ontario':
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=0"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.dropna(how='any')
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['LTC Deaths'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['LTC Deaths'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Deaths'],line=dict(color='red', width=3), visible=True, opacity=0.5,name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Deaths'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Total LTC Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=689073638"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df.PHU == PHU[region]]
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="long term care deaths", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = df.groupby('Date')['Resident Deaths'].sum().tail(1).values[0],
number = {'font': {'size': 60}},))
fig.update_layout(
showlegend=False,
template = {'data' : {'indicator': [{
'mode' : "number",
},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Confirmed Resident Deaths<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="long term care deaths", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def ltc_outbreaks_plot(region='ontario'):
if region == 'ontario':
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=0"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.dropna(how='any')
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['LTC Homes'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d", 'reference': df['LTC Homes'].iloc[-2],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Homes'],line=dict(color='red', width=3), visible=True, opacity=0.5,name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['LTC Homes'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"# of LTC Homes with Outbreaks<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
else:
url = "https://docs.google.com/spreadsheets/d/1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U/export?format=csv&id=1pWmFfseTzrTX06Ay2zCnfdCG0VEJrMVWh-tAU9anZ9U&gid=689073638"
s=requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')))
df['Date'] = pd.to_datetime(df['Date'])
df = df.loc[df.PHU == PHU[region]]
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="long term care outbreaks", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number",
value = df.groupby('Date')['LTC Home'].count().tail(1).values[0],
number = {'font': {'size': 60}},))
fig.update_layout(
showlegend=False,
template = {'data' : {'indicator': [{
'mode' : "number",
},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"# of LTC Homes with Outbreaks<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.Date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="long term care outbreaks", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def rt_analysis_plot(region='Ontario'):
url = "https://docs.google.com/spreadsheets/d/19LFZWy85MVueUm2jYmXXE6EC3dRpCPGZ05Bqfv5KyGA/export?format=csv&id=19LFZWy85MVueUm2jYmXXE6EC3dRpCPGZ05Bqfv5KyGA&gid=428679599"
df = pd.read_csv(url)
df['date'] = pd.to_datetime(df['date'])
if region=='Ontario':
df = df.loc[df.phu == region]
else:
df = df.loc[df.phu == PHU[region]]
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="rt analysis", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['ML'].tail(1).values[0],
number = {'font': {'size': 60}},))
fig.add_trace(go.Scatter(x=df.date,y=df.ML,line=dict(color='red', width=3),visible=True,opacity=0.5))
fig.add_trace(go.Scatter(x=df.date,y=df.Low,
fill=None,
mode='lines',
line_color='grey',opacity=0.1
))
fig.add_trace(go.Scatter(x=df.date,y=df.High,
fill='tonexty',
mode='lines', line_color='grey',opacity=0.1))
fig.update_layout(
showlegend=False,
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'reference': df['ML'].tail(2).values[0],
'increasing': {'color':'red'},
'decreasing': {'color':'green'}}},
]
}})
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"R<sub>t</sub> value</span><br><span style='font-size:0.5em;color:gray'>Last Updated: {df.date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.add_shape(
type="line",
xref="paper",
yref="y",
x0=0,
y0=1,
x1=1,
y1=1,
line=dict(
color="white",
width=2,
),
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="rt analysis",phu=region.lower()).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Mobility
def apple_mobility_plot(region='ontario'):
df = vis.get_mobility_transportation()
df = df.loc[df.region=='Ontario']
fig = go.Figure()
df = df.loc[df.transportation_type == 'driving']
df['date'] = pd.to_datetime(df['date'])
df = df.sort_values(['date'])
fig.add_trace(go.Scatter(x=df.date,y=df['value'],line=dict(color='#FFF', dash='dot'),opacity=0.5,name="Value"))
fig.add_trace(go.Scatter(x=df.date,y=df['value'].rolling(7).mean(),line=dict(color='red', width=3),opacity=1,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'tickformat':'%d-%b'},
yaxis = {'showgrid': False},
title={'text':f"Driving Mobility<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.add_shape(
type="line",
xref="paper",
yref="y",
x0=0,
y0=100,
x1=1,
y1=100,
line=dict(
color="white",
width=2,
),
)
fig.update_layout(
margin=dict(l=0, r=20, t=40, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="transit mobility").first()
p.html = div
db.session.add(p)
db.session.commit()
return
def retail_mobility_plot(region='ontario'):
df = vis.get_mobility()
df = df.loc[df.region == 'Ontario']
df = df.loc[df.category == "Retail & recreation"]
df['date'] = pd.to_datetime(df['date'])
df['day'] = df['date'].dt.day_name()
df = df.sort_values(['date'])
date_include = datetime.strptime("2020-02-18","%Y-%m-%d")
df = df.loc[df['date'] > date_include]
fig = go.Figure()
df = df.loc[df.category == "Retail & recreation"]
fig.add_trace(go.Scatter(x=df.date,y=df['value'],line=dict(color='red', width=3),visible=True,opacity=0.5,name="Value"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Retail and Recreation<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=10, t=30, b=50),
plot_bgcolor='#343332',
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="retail mobility",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def work_mobility_plot(region='ontario'):
df = vis.get_mobility()
df = df.loc[df.region == 'Ontario']
df['date'] = pd.to_datetime(df['date'])
df['day'] = df['date'].dt.day_name()
df = df.loc[df.category == "Workplace"]
df = df.loc[~df.day.isin(["Saturday", "Sunday"])]
df = df.sort_values(['date'])
date_include = datetime.strptime("2020-02-18","%Y-%m-%d")
df = df.loc[df['date'] > date_include]
fig = go.Figure()
fig.add_trace(go.Scatter(x=df.date,y=df['value'],line=dict(color='red', width=3),visible=True,opacity=0.5,name="Value"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"Workplaces<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=10, t=30, b=50),
plot_bgcolor='#343332',
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="work mobility",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
## Capacity
def icu_ontario_plot(region='ontario'):
if region == 'ontario':
df = vis.get_icu_capacity_province()
df['Date'] = pd.to_datetime(df['date'])
else:
df = vis.get_icu_capacity_phu()
df = df.loc[df.PHU == PHU[region]]
df['Date'] = pd.to_datetime(df['date'])
if len(df) <= 0:
div = sql.null()
p = Viz.query.filter_by(header="residual beds", phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
fig = go.Figure()
fig.add_trace(go.Indicator(
mode = "number+delta",
value = df['residual_beds'].tail(1).values[0],
number = {'font': {'size': 60}}
),
)
fig.update_layout(
template = {'data' : {'indicator': [{
'mode' : "number+delta+gauge",
'delta' : {'valueformat':"d",'reference': df['residual_beds'].iloc[-2],
'increasing': {'color':'green'},
'decreasing': {'color':'red'}}},
]
}})
fig.add_trace(go.Scatter(x=df.date,y=df['residual_beds'],line=dict(color='red', width=3), visible=True, opacity=0.5, name="Value"))
# fig.add_trace(go.Scatter(x=df.Date,y=df['residual_beds'].rolling(7).mean(),line=dict(color='#FFF', dash='dot'), opacity=0.5,name="7 Day Average"))
fig.update_layout(
xaxis = {'showgrid': False,'visible':True, 'tickformat':'%d-%b'},
yaxis = {'showgrid': False,'visible':True},
title={'text':f"ICU Beds Left<br><span style='font-size:0.5em;color:gray'>Last Updated: {df.date.tail(1).values[0].astype('M8[D]')}</span><br>",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
font=dict(
family="Roboto",
color="#FFF"
)
)
fig.update_layout(
margin=dict(l=0, r=20, t=30, b=50),
plot_bgcolor="#343332",
paper_bgcolor="#343332",
legend_orientation="h",
)
div = fig.to_json()
p = Viz.query.filter_by(header="residual beds",phu=region).first()
p.html = div
db.session.add(p)
db.session.commit()
return
def ventilator_ontario_plot(region='ontario'):
if region == 'ontario':
df = vis.get_icu_capacity_province()
df['Date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import pytest
from ..dataset import (
magnitude_and_scale,
get_type,
_try_import,
indent,
get_df_type,
cast_and_clean_df,
sql_dataset,
)
import pandas as pd
import numpy as np
import datetime
import pyodbc
import requests
CMD_DROP_TEST_TABLE_IF_EXISTS = "IF OBJECT_ID('test_table', 'U') IS NOT NULL DROP TABLE test_table;"
CMD_CREATE_TEST_TABLE = """
CREATE TABLE test_table (
[dt] datetime NULL,
[dt2] date NOT NULL,
[uid] nvarchar(10) NOT NULL,
[strcol] nvarchar(max) NOT NULL,
[name] nvarchar(10) NULL,
[empty_col] nvarchar(100) NULL,
[float] decimal(22,3) NULL,
[float_k] decimal(22,3) NULL,
[float_m] decimal(22,13) NULL,
[float_b] decimal(22,9) NULL,
[float_na] decimal(22,3) NULL,
[bit] bit NULL,
[bit_na] bit NULL,
[tinyint] tinyint NULL,
[tinyint_na] tinyint NULL,
[smallint] smallint NOT NULL,
[smallint_na] smallint NULL,
[int] int NOT NULL,
[int_na] int NULL,
[bigint] bigint NULL,
[bigint_na] bigint NULL,
[bool] bit NULL,
[bool_na] bit NULL,
[empty_str_col] nvarchar(100) NULL
);
"""
expected_schema = [
['dt', 'datetime', [], True, ''],
['dt2', 'date', [], False, ''],
['uid', 'nvarchar', [10], False, ''],
['strcol', 'nvarchar', ['max'], False, ''],
['name', 'nvarchar', [10], True, ''],
['empty_col', 'nvarchar', [100], True, ''],
['float', 'decimal', [22,3], True, ''],
['float_k', 'decimal', [22,3], True, ''],
['float_m', 'decimal', [22,13], True, ''],
['float_b', 'decimal', [22,9], True, ''],
['float_na', 'decimal', [22,3], True, ''],
['bit', 'bit', [], True, ''],
['bit_na', 'bit', [], True, ''],
['tinyint', 'tinyint', [], True, ''],
['tinyint_na', 'tinyint', [], True, ''],
['smallint', 'smallint', [], False, ''],
['smallint_na', 'smallint', [], True, ''],
['int', 'int', [], False, ''],
['int_na', 'int', [], True, ''],
['bigint', 'bigint', [], True, ''],
['bigint_na', 'bigint', [], True, ''],
['bool', 'bit', [], True, ''],
['bool_na', 'bit', [], True, ''],
['empty_str_col', 'nvarchar', [100], True, ''],
]
# dataset.magnitude_and_scale
def test_magnitude_and_scale_int():
mag, scale = magnitude_and_scale(pd.Series([1, 2, 3]).astype(int))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float_type_int():
mag, scale = magnitude_and_scale(pd.Series([123.0, 1.0, 1234.0, np.nan]))
assert mag == 4
assert scale == 0
def test_magnitude_and_scale_float_with_inf():
mag, scale = magnitude_and_scale(pd.Series([1.0, 2.0, np.inf, -np.inf]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_zero():
mag, scale = magnitude_and_scale(pd.Series([0]))
assert mag == 1
assert scale == 0
def test_magnitude_and_scale_float():
mag, scale = magnitude_and_scale(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert mag == 5
assert scale == 8
def test_magnitude_and_scale_only_frac_part():
mag, scale = magnitude_and_scale(pd.Series([0.12345, 0.123456, 0.123]))
assert mag == 1
assert scale == 6
def test_magnitude_and_scale_empty_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([], dtype='float64'))
def test_magnitude_and_scale_nan_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.nan]))
def test_magnitude_and_scale_inf_raises_error():
with pytest.raises(ValueError) as e_info:
mag, scale = magnitude_and_scale(pd.Series([np.inf]))
# dataset.get_type
def test_get_type_decimal():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([123.1234, 12345.1234567, 12.1234567800]))
assert dtype == 'decimal'
assert params == [19, 12]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([0.12345, 0.123456, 0.123]))
assert dtype == 'decimal'
assert params == [10, 9]
assert has_null == False
assert comment == ''
def test_get_type_decimal_na_inf():
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([1.1, 2.1, 3.0, np.nan, np.inf]))
assert dtype == 'decimal'
assert params == [2, 1]
assert has_null == True
assert comment == ''
def test_get_type_str():
dtype, params, has_null, comment = get_type(pd.Series(['123']))
assert dtype == 'nvarchar'
assert params == [6]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series(['a' * 1000]))
assert dtype == 'nvarchar'
assert params == [2000]
assert has_null == False
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series(['a' * 2001]))
assert dtype == 'nvarchar'
assert params == [4000]
assert has_null == False
assert comment == ''
def test_get_type_str_max():
with pytest.warns(None):
dtype, params, has_null, comment = get_type(pd.Series(['a' * 4001]))
assert dtype == 'nvarchar'
assert params == ['max']
assert has_null == False
assert comment == 'Maximum string length is 4001. Using nvarchar(max).'
def test_get_type_str_na():
dtype, params, has_null, comment = get_type(pd.Series(['a', 'b', 'c', 'def', np.nan]))
assert dtype == 'nvarchar'
assert params == [6]
assert has_null == True
assert comment == ''
def test_get_type_str_empty():
dtype, params, has_null, comment = get_type(pd.Series(['', '', '', '', '']))
assert dtype == 'nvarchar'
assert params == [255]
assert has_null == False
assert comment == 'zero-length string column, defaulting to nvarchar(255)'
def test_get_type_bool():
dtype, params, has_null, comment = get_type(pd.Series([True, False]))
assert dtype == 'bit'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_bool_na():
dtype, params, has_null, comment = get_type(pd.Series([True, False, np.nan]))
assert dtype == 'bit'
assert params == []
assert has_null == True
assert comment == ''
def test_get_type_bit():
dtype, params, has_null, comment = get_type(pd.Series([0, 1, 0.0, 1.00]))
assert dtype == 'bit'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_tinyint():
dtype, params, has_null, comment = get_type(pd.Series([0, 1, 2, 3, 3.0, 4.0]))
assert dtype == 'tinyint'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_smallint():
dtype, params, has_null, comment = get_type(pd.Series([-2.0, -1, 0.000, 1, 2.0]))
assert dtype == 'smallint'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_int():
dtype, params, has_null, comment = get_type(pd.Series([-60000, 0.000, 60000]))
assert dtype == 'int'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_int_zeros():
# if the entire column is 0, default to int
dtype, params, has_null, comment = get_type(pd.Series([0, 0, 0]))
assert dtype == 'int'
assert params == []
assert has_null == False
assert comment == 'column contains only zeros; defaulting to int'
def test_get_type_int_zeros_na():
# if the entire column is 0 and null, default to int
dtype, params, has_null, comment = get_type(pd.Series([0, 0, 0, np.nan]))
assert dtype == 'int'
assert params == []
assert has_null == True
assert comment == 'column contains only zeros; defaulting to int'
def test_get_type_bigint():
dtype, params, has_null, comment = get_type(pd.Series([-2147490000, 0.000, 2147490000]))
assert dtype == 'bigint'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_mixed():
# test different orders of the same mixed values; these should all return nvarchar.
# this is to guard against naively detecting types by the first non-empty value in object dtype columns
dtype, params, has_null, comment = get_type(pd.Series([1, 2.0, 3.1, 'abc', pd.Timestamp('2020-01-01 00:00:00'), np.nan]))
assert dtype == 'nvarchar'
assert params == [38]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([pd.Timestamp('2020-01-01 00:00:00'), 1, 2.0, 3.1, 'abc', np.nan]))
assert dtype == 'nvarchar'
assert params == [38]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([datetime.date(2020, 1, 1), 1, 2.0, 3.1, 'abc', np.nan]))
assert dtype == 'nvarchar'
assert params == [20]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([datetime.datetime(2020, 1, 1, 0, 0, 0), 1, 2.0, 3.1, 'abc', np.nan]))
assert dtype == 'nvarchar'
assert params == [38]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([1, 2.0, 3.1, pd.Timestamp('2020-01-01 00:00:00'), 'abc', np.nan]))
assert dtype == 'nvarchar'
assert params == [38]
assert has_null == True
assert comment == ''
dtype, params, has_null, comment = get_type(pd.Series([2.0, 1, 3.1, pd.Timestamp('2020-01-01 00:00:00'), 'abc', np.nan]))
assert dtype == 'nvarchar'
assert params == [38]
assert has_null == True
assert comment == ''
def test_get_type_datetime():
dtype, params, has_null, comment = get_type(pd.to_datetime(['2020-01-01', '2020-01-02']))
assert dtype == 'datetime'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_date():
dtype, params, has_null, comment = get_type(pd.to_datetime(['2020-01-01', '2020-01-02']).date)
assert dtype == 'date'
assert params == []
assert has_null == False
assert comment == ''
def test_get_type_empty():
dtype, params, has_null, comment = get_type(pd.Series([], dtype=object))
assert dtype == 'nvarchar'
assert params == [255]
assert has_null == True
assert comment == 'empty column, defaulting to nvarchar(255)'
def test_get_type_empty_only_na():
dtype, params, has_null, comment = get_type(pd.Series([np.nan]))
assert dtype == 'nvarchar'
assert params == [255]
assert has_null == True
assert comment == 'empty column, defaulting to nvarchar(255)'
def test_try_import():
package = _try_import('numpy')
assert package is np
module = _try_import('numpy.abs')
assert module is np.abs
from pandas.tseries import offsets
module = _try_import('pandas.tseries.offsets')
assert module is offsets
from pandas.tseries.offsets import DateOffset
method = _try_import('pandas.tseries.offsets.DateOffset')
assert method is DateOffset
def test_indent():
assert indent(['blah']) == [' blah']
df = pd.DataFrame({
'intcol': pd.Series([1,2,3]),
'intcol2': pd.Series([1,2,np.nan]),
'strcol': pd.Series(['a', 'b', 'c']),
'strcol2': pd.Series(['a'*10, 'b'*10, 'c'*10]),
'strcol3': | pd.Series(['a'*4001, 'b'*4001, 'c'*4001]) | pandas.Series |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
import argparse
import numpy as np
import os
import pandas as pd
import pdb
def add_dataset_info(results_raw: pd.DataFrame, task_metadata: pd.DataFrame):
results_raw['tid'] = [int(x.split('/')[-1]) for x in results_raw['id']]
task_metadata['ClassRatio'] = task_metadata['MinorityClassSize'] / task_metadata['NumberOfInstances']
results_raw = results_raw.merge(task_metadata, on=['tid'])
return results_raw
def mean_score(df: pd.DataFrame, column: str = 'result'):
return round(df[column].mean(), 4)
def filter_type(df: pd.DataFrame):
return df[df['type'] == 'binary'].append(df[df['type'] == 'multiclass']) #df[df['type'] == 'regression']
def filter_samples(df, samples=100000, lower=True):
return df[df['NumberOfInstances'] < samples] if lower else df[df['NumberOfInstances'] >= samples]
def filter_features(df, features=100, lower=True):
return df[df['NumberOfFeatures'] < features] if lower else df[df['NumberOfFeatures'] >= features]
def filter_duration(df, duration=40000):
return df[df['duration'] < duration]
def compute_win_lose(base, tie, pseudo_label):
total = max(len(base) + len(pseudo_label) + len(tie), 1)
return round((len(pseudo_label) + 0.5 * len(tie)) / total, 4), round((len(base) + 0.5 * len(tie)) / total, 4)
def print_inference_speedup(df1, df2):
relative_speedups = []
absolute_speedups = []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1['predict_duration'].isna().item() or row2['predict_duration'].isna().item():
continue
df1_time, df2_time = row1['predict_duration'].item(), row2['predict_duration'].item()
if df1_time == 0 or df2_time == 0:
continue
relative_speedups.append((df1_time - df2_time)/min(df1_time, df2_time))
absolute_speedups.append(df1_time - df2_time)
print(f"Average Relative Speedup: {round(np.mean(relative_speedups), 4)}, Average Absolute Speedup: {round(np.mean(absolute_speedups), 4)}")
def compare_dfs_improvement(df1, df2):
metric = "acc"
binary, multiclass, regression = [], [], []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1[metric].isna().item() or row2[metric].isna().item():
continue
df1_score, df2_score = row1[metric].item(), row2[metric].item()
problem_type = df1_rows.iloc[0]['type']
try:
if problem_type == "binary":
score = (df2_score - df1_score) / df1_score if df1_score > df2_score else (df2_score - df1_score) / df2_score
binary.append(score)
elif problem_type == "multiclass":
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
multiclass.append(score)
else:
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
regression.append(score)
except:
pass
binary_improvement = round(np.mean(binary), 4)
multiclass_improvement = round(np.mean(multiclass), 4)
regression_improvement = round(np.mean(regression), 4)
total_improvement = round(np.mean(binary + multiclass + regression), 4)
return total_improvement, binary_improvement, multiclass_improvement, regression_improvement
def compare_dfs(df1, df2, grouped=False):
df1_better, equal_performance, df2_better = [], [], []
metric = "acc"
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
if grouped:
if len(df1_rows) > 0:
df1_score = df1_rows[metric].dropna().mean()
if df1_score != df1_score:
continue
else:
continue
if len(df2_rows) > 0:
df2_score = df2_rows[metric].dropna().mean()
if df2_score != df2_score:
continue
else:
continue
if df1_score > df2_score:
df1_better.append(task)
elif df1_score < df2_score:
df2_better.append(task)
else:
equal_performance.append(task)
else:
for fold in df1_rows["fold"].unique():
row1, row2 = df1_rows[df1_rows["fold"] == fold], df2_rows[df2_rows["fold"] == fold]
if len(row1) == 0 or len(row2) == 0 or row1[metric].isna().item() or row2[metric].isna().item():
continue
score1, score2 = row1[metric].item(), row2[metric].item()
if score1 > score2:
df1_better.append(task+f"_{fold}")
elif score1 < score2:
df2_better.append(task+f"_{fold}")
else:
equal_performance.append(task+f"_{fold}")
return df1_better, equal_performance, df2_better
def print_miscellaneous(df1, df2):
metric = "acc"
score_diffs = []
for task in df1['task'].unique():
df1_rows = df1[df1["task"] == task]
df2_rows = df2[df2["task"] == task]
if len(df1_rows) > 0:
df1_score = df1_rows[metric].dropna().mean()
if df1_score != df1_score:
continue
else:
continue
if len(df2_rows) > 0:
df2_score = df2_rows[metric].dropna().mean()
if df2_score != df2_score:
continue
else:
continue
problem_type = df1_rows.iloc[0]['type']
if problem_type == "binary":
score = (df2_score - df1_score) / df1_score if df1_score > df2_score else (df2_score - df1_score) / df2_score
elif problem_type == "multiclass":
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
else:
score = (df1_score - df2_score) / df1_score if df1_score < df2_score else (df1_score - df2_score) / df2_score
score_diffs.append((task, score))
score_diffs = sorted(score_diffs, key=lambda info: info[1])
score_diffs = [diff[1] for diff in score_diffs]
print(f"Relative Error Reduction Info: {round(np.mean(score_diffs), 4)} ± {round(np.std(score_diffs), 4)}, ({round(score_diffs[0], 4)}, {round(score_diffs[-1], 4)})")
lower_quantile, upper_quantile = np.quantile(score_diffs, 0.025), np.quantile(score_diffs, 0.975)
score_diffs = [diff for diff in score_diffs if lower_quantile < diff < upper_quantile]
print(f"Relative Error Reduction Info (mean ± 2 * sigma): {round(np.mean(score_diffs), 4)} ± {round(np.std(score_diffs), 4)}, ({round(score_diffs[0], 4)}, {round(score_diffs[-1], 4)})")
print(f"Number of Errored Runs (Base/pseudo_label): {len(base[~base['info'].isna()])}/{len(pseudo_label[~pseudo_label['info'].isna()])}")
def print_automl_comparisons(base: pd.DataFrame, pseudo_label: pd.DataFrame, others: pd.DataFrame):
print("==============================================================================")
rows = []
for framework in others['framework'].unique():
other = others[others['framework'] == framework]
first_better, equal_performance, second_better = compare_dfs(other, base)
base_win, base_lose = compute_win_lose(first_better, equal_performance, second_better)
first_better, equal_performance, second_better = compare_dfs(other, pseudo_label)
pseudo_label_win, pseudo_label_lose = compute_win_lose(first_better, equal_performance, second_better)
base_improvement, _, _, _ = compare_dfs_improvement(other, base)
pseudo_label_improvement, _, _, _ = compare_dfs_improvement(other, pseudo_label)
rows.append({'Framework': framework, 'Base Win Rate': base_win, 'pseudo_label Win Rate': pseudo_label_win,
'Base Error Reduction': base_improvement, 'pseudo_label Error Reduction': pseudo_label_improvement,
'Win Rate Improvement': pseudo_label_win - base_win, 'Error Reduction Improvement': pseudo_label_improvement - base_improvement})
df = pd.DataFrame(rows)
print(df)
print("==============================================================================")
def print_suite_result(base: pd.DataFrame, pseudo_label: pd.DataFrame, indepth=True, grouped=False):
baselow = filter_samples(base, samples=args.sample_low)
pseudo_labellow = filter_samples(pseudo_label, samples=args.sample_low)
basemed = filter_samples(filter_samples(base, samples=args.sample_low, lower=False), samples=args.sample_med)
pseudo_labelmed = filter_samples(filter_samples(pseudo_label, samples=args.sample_low, lower=False), samples=args.sample_med)
basehigh = filter_samples(base, samples=args.sample_med, lower=False)
pseudo_labelhigh = filter_samples(pseudo_label, samples=args.sample_med, lower=False)
first_better, equal_performance, second_better = compare_dfs(base, pseudo_label, grouped=grouped)
num_total = len(first_better) + len(second_better) + len(equal_performance)
print("==============================================================================")
print(f"Mean Improvement Ratio: {compare_dfs_improvement(base, pseudo_label)[0]}")
print(f"Win Rate: {round((len(second_better) + 0.5 * len(equal_performance))/ num_total, 4)}, Lose Rate: {round((len(first_better) + 0.5 * len(equal_performance)) / num_total, 4)}")
print(f"All Run Base Win: {len(first_better)}, pseudo_label Win: {len(second_better)}, Tie: {len(equal_performance)}")
rows = []
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'ALL', 'Feature Size': 'ALL', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
ss_base = filter_features(baselow, features=args.feature_low)
ss_pseudo_label = filter_features(pseudo_labellow, features=args.feature_low)
first_better, equal_performance, second_better = compare_dfs(ss_base, ss_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'S', 'Feature Size': 'S', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
sm_base = filter_features(filter_features(baselow, features=args.feature_low, lower=False), features=args.feature_med)
sm_pseudo_label = filter_features(filter_features(pseudo_labellow, features=args.feature_low, lower=False), features=args.feature_med)
first_better, equal_performance, second_better = compare_dfs(sm_base, sm_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'S', 'Feature Size': 'M', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
sl_base = filter_features(baselow, features=args.feature_med, lower=False)
sl_pseudo_label = filter_features(pseudo_labellow, features=args.feature_med, lower=False)
first_better, equal_performance, second_better = compare_dfs(sl_base, sl_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'S', 'Feature Size': 'L', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
ms_base = filter_features(basemed, features=args.feature_low)
ms_pseudo_label = filter_features(pseudo_labelmed, features=args.feature_low)
first_better, equal_performance, second_better = compare_dfs(ms_base, ms_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'M', 'Feature Size': 'S', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
mm_base = filter_features(filter_features(basemed, features=args.feature_low, lower=False), features=args.feature_med)
mm_pseudo_label = filter_features(filter_features(pseudo_labelmed, features=args.feature_low, lower=False), features=args.feature_med)
first_better, equal_performance, second_better = compare_dfs(mm_base, mm_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'M', 'Feature Size': 'M', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
ml_base = filter_features(basemed, features=args.feature_med, lower=False)
ml_pseudo_label = filter_features(pseudo_labelmed, features=args.feature_med, lower=False)
first_better, equal_performance, second_better = compare_dfs(ml_base, ml_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'M', 'Feature Size': 'L', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
ls_base = filter_features(basehigh, features=args.feature_low)
ls_pseudo_label = filter_features(pseudo_labelhigh, features=args.feature_low)
first_better, equal_performance, second_better = compare_dfs(ls_base, ls_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'L', 'Feature Size': 'S', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
lm_base = filter_features(filter_features(basehigh, features=args.feature_low, lower=False), features=args.feature_med)
lm_pseudo_label = filter_features(filter_features(pseudo_labelhigh, features=args.feature_low, lower=False), features=args.feature_med)
first_better, equal_performance, second_better = compare_dfs(lm_base, lm_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'L', 'Feature Size': 'M', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
ll_base = filter_features(basehigh, features=args.feature_med, lower=False)
ll_pseudo_label = filter_features(pseudo_labelhigh, features=args.feature_med, lower=False)
first_better, equal_performance, second_better = compare_dfs(ll_base, ll_pseudo_label, grouped=grouped)
win, lose = compute_win_lose(first_better, equal_performance, second_better)
rows.append({'Sample Size': 'L', 'Feature Size': 'L', 'Base Win': len(first_better), 'pseudo_label Win': len(second_better), 'Tie': len(equal_performance), 'Win Rate': win, 'Lose Rate': lose})
df = pd.DataFrame(rows)
print(df)
print("==============================================================================")
# ======= NEW ======= #
# 1h
# base = "result/baseline/1hmed/results_automlbenchmark_1h8c_autogluon.ag.1h8c.aws.20210827T163031.csv"
# pseudo_label = "result/best/1hmed/results_automlbenchmark_1h8c_pseudo_label_med.ag.1h8c.aws.20210828T182000.csv"
# base = "result/baseline/1hhigh/results_automlbenchmark_1h8c_autogluon_high.ag.1h8c.aws.20210829T224457.csv"
# pseudo_label = "result/best/1hhigh/results_automlbenchmark_1h8c_pseudo_label_high.ag.1h8c.aws.20210829T224459.csv"
# base = "result/baseline/1hnorepeat/results_automlbenchmark_1h8c_autogluon_norepeat.ag.1h8c.aws.20210827T202558.csv"
# pseudo_label = "result/best/1hnorepeat/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210829T224516.csv"
# base = "result/baseline/1hbest/results_automlbenchmark_1h8c_autogluon_bestquality.ag.1h8c.aws.20210830T230714.csv"
# base = "~/Downloads/results_automlbenchmark_1h8c_2021_08_29_knn.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_autogluon_bestquality.ag.1h8c.aws.20210902T175142.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210902T175139.csv"
# norepeat
# base = "~/Downloads/results_automlbenchmark_1h8c_autogluon_norepeat.ag.1h8c.aws.20210908T151458.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210905T230349.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210906T213059.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210908T035241.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210909T090238.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_norepeat.ag.1h8c.aws.20210909T202902.csv"
# full
base = "~/Downloads/4h8c_best_temp/results_automlbenchmark_10_19_AG_best_quality.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210904T011959(1).csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210905T192540.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210906T202118.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_minimprovement.ag.1h8c.aws.20210906T213112.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210907T084943.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_stoppinground.ag.1h8c.aws.20210907T104958.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210907T175902.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_replacebag.ag.1h8c.aws.20210907T175858.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210907T221927.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_replacebag.ag.1h8c.aws.20210907T222005.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210908T012205.csv" # slide result
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label_minimprovement.ag.1h8c.aws.20210908T094645.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210908T182233.csv" # no 300 cap
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210908T235902.csv" # removed feature metadata bug
# pseudo_label = "~/Downloads/results_automlbenchmark_1h8c_pseudo_label.ag.1h8c.aws.20210909T070228.csv" # experimental
pseudo_label = "~/Downloads/4h8c_best_temp/results_automlbenchmark_10_25_temp_best_4h8c.csv" # final
# 4h
# base = "result/baseline/4hmed/results_automlbenchmark_4h8c_autogluon.ag.4h8c.aws.20210827T163032.csv"
# pseudo_label = "result/best/4hmed/results_automlbenchmark_4h8c_pseudo_label_med.ag.4h8c.aws.20210828T210007.csv"
# base = "result/baseline/4hhigh/results_automlbenchmark_4h8c_autogluon_high.ag.4h8c.aws.20210830T073353.csv"
# pseudo_label = "result/best/4hhigh/results_automlbenchmark_4h8c_pseudo_label_high.ag.4h8c.aws.20210830T073352.csv"
# base = "result/baseline/4hnorepeat/results_automlbenchmark_4h8c_autogluon_norepeat.ag.4h8c.aws.20210827T062721.csv"
# pseudo_label = "result/best/4hnorepeat/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210828T210006.csv"
# pseudo_label = "result/best/4hnorepeat/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210829T060616.csv"
# base = "result/baseline/4hbest/results_automlbenchmark_4h8c_autogluon_bestquality.ag.4h8c.aws.20210827T062731.csv"
# base = "result/best/4hbest/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210828T210005.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_improvementthreshold.ag.4h8c.aws.20210829T060617.csv"
# # norepeat
# base = "~/Downloads/results_automlbenchmark_4h8c_autogluon_norepeat.ag.4h8c.aws.20210908T145253.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210905T230350.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210907T001718.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210908T062913.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_norepeat.ag.4h8c.aws.20210909T090240.csv"
# # full
# base = "~/Downloads/results_automlbenchmark_4h8c_2021_09_02.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210902T062359(1).csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210905T192543.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210906T095323.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210906T202121.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210907T131305.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210908T012206.csv" # slide result
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label_minimprovement.ag.4h8c.aws.20210908T074624.csv"
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210908T182235.csv" # no 300 cap
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210908T235905.csv" # no feature metadata bag
# pseudo_label = "~/Downloads/results_automlbenchmark_4h8c_pseudo_label.ag.4h8c.aws.20210909T070232.csv" # experimental
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--sample_low', help='upper boundary that determines whether a dataset is small sized', default=5000, type=int)
parser.add_argument('-b', '--sample_med', help='upper boundary that determines whether a dataset is medium sized', default=50000, type=int)
parser.add_argument('-c', '--feature_low', help='upper boundary that determines whether a dataset feature space is small sized', default=20, type=int)
parser.add_argument('-d', '--feature_med', help='upper boundary that determines whether a dataset feature space is medium sized', default=50, type=int)
parser.add_argument('-e', '--duration', help='determines before what hour the job must have finished', default=4, type=float)
parser.add_argument('-f', '--framework', help='whether to compare against other frameworks', default=False, type=bool)
parser.add_argument('-g', '--done_only', help='whether to display results for only datasets that finished', default=False, type=bool)
args = parser.parse_args()
print(f"{os.path.basename(base)} vs {os.path.basename(pseudo_label)}")
base = filter_type(pd.read_csv(base))
pseudo_label = filter_type(pd.read_csv(pseudo_label))
# base = base[base['framework'] == 'AutoGluon_bestquality'] # FIXME !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# pseudo_label = pseudo_label[pseudo_label['framework'] == 'AutoGluon_bestquality'] # FIXME !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# pseudo_label = pseudo_label[pseudo_label['framework'] == 'AutoGluon_bestquality_pseudo_label'] # FIXME !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# pseudo_label TRIGGERED ONLY
# # import pdb; pdb.set_trace()
# debug = pd.read_csv('~/Downloads/debug_info(166).csv')
# untriggered_rows = debug[(~debug['pseudo_labeld']) & (debug['total_pseudo_label_time'] == 0)]
# untriggered_task_fold = untriggered_rows['name'] + untriggered_rows['fold'].astype(str)
# base['taskfold'] = base['task'] + base['fold'].astype(str)
# pseudo_label['taskfold'] = pseudo_label['task'] + pseudo_label['fold'].astype(str)
# base = base[~base['taskfold'].isin(untriggered_task_fold)]
# pseudo_label = pseudo_label[~pseudo_label['taskfold'].isin(untriggered_task_fold)]
# others = pd.read_csv(f"result/baseline/{int(args.duration)}hbest/other_systems.csv")
task_metadata = | pd.read_csv('~/Downloads/task_metadata.csv') | pandas.read_csv |
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
2017-01-01|2017-01-01 |True
2017-01-02 |2017-01-02|True
2017-10-01 |2017-10-10 |False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2, ignore_spaces=True)
assert_series_equal(expect_out, actual_out_rev, check_names=False)
def test_date_columns_unequal():
"""I want datetime fields to match with dates stored as strings
"""
df = pd.DataFrame([{"a": "2017-01-01", "b": "2017-01-02"}, {"a": "2017-01-01"}])
df["a_dt"] = pd.to_datetime(df["a"])
df["b_dt"] = pd.to_datetime(df["b"])
assert datacompy.columns_equal(df.a, df.a_dt).all()
assert datacompy.columns_equal(df.b, df.b_dt).all()
assert datacompy.columns_equal(df.a_dt, df.a).all()
assert datacompy.columns_equal(df.b_dt, df.b).all()
assert not datacompy.columns_equal(df.b_dt, df.a).any()
assert not datacompy.columns_equal(df.a_dt, df.b).any()
assert not datacompy.columns_equal(df.a, df.b_dt).any()
assert not datacompy.columns_equal(df.b, df.a_dt).any()
def test_bad_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[{"a": "2017-01-01", "b": "2017-01-01"}, {"a": "2017-01-01", "b": "217-01-01"}]
)
df["a_dt"] = pd.to_datetime(df["a"])
assert not datacompy.columns_equal(df.a_dt, df.b).any()
def test_rounded_date_columns():
"""If strings can't be coerced into dates then it should be false for the
whole column.
"""
df = pd.DataFrame(
[
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.000000", "exp": True},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00.123456", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:01.000000", "exp": False},
{"a": "2017-01-01", "b": "2017-01-01 00:00:00", "exp": True},
]
)
df["a_dt"] = pd.to_datetime(df["a"])
actual = datacompy.columns_equal(df.a_dt, df.b)
expected = df["exp"]
assert_series_equal(actual, expected, check_names=False)
def test_decimal_float_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": False},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_float_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": 1, "expected": True},
{"a": Decimal("1.3"), "b": 1.3, "expected": True},
{"a": Decimal("1.000003"), "b": 1.000003, "expected": True},
{"a": Decimal("1.000000004"), "b": 1.000000003, "expected": True},
{"a": Decimal("1.3"), "b": 1.2, "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": 1, "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": False},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_decimal_columns_equal_rel():
df = pd.DataFrame(
[
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.3"), "expected": True},
{"a": Decimal("1.000003"), "b": Decimal("1.000003"), "expected": True},
{"a": Decimal("1.000000004"), "b": Decimal("1.000000003"), "expected": True},
{"a": Decimal("1.3"), "b": Decimal("1.2"), "expected": False},
{"a": np.nan, "b": np.nan, "expected": True},
{"a": np.nan, "b": Decimal("1"), "expected": False},
{"a": Decimal("1"), "b": np.nan, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.001)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_infinity_and_beyond():
df = pd.DataFrame(
[
{"a": np.inf, "b": np.inf, "expected": True},
{"a": -np.inf, "b": -np.inf, "expected": True},
{"a": -np.inf, "b": np.inf, "expected": False},
{"a": np.inf, "b": -np.inf, "expected": False},
{"a": 1, "b": 1, "expected": True},
{"a": 1, "b": 0, "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1", "expected": False},
{"a": 1, "b": "yo", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_mixed_column_with_ignore_spaces_and_case():
df = pd.DataFrame(
[
{"a": "hi", "b": "hi ", "expected": True},
{"a": 1, "b": 1, "expected": True},
{"a": np.inf, "b": np.inf, "expected": True},
{"a": Decimal("1"), "b": Decimal("1"), "expected": True},
{"a": 1, "b": "1 ", "expected": False},
{"a": 1, "b": "yo ", "expected": False},
{"a": "Hi", "b": "hI ", "expected": True},
{"a": "HI", "b": "HI ", "expected": True},
{"a": "hi", "b": "hi ", "expected": True},
]
)
actual_out = datacompy.columns_equal(df.a, df.b, ignore_spaces=True, ignore_case=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_compare_df_setter_bad():
df = pd.DataFrame([{"a": 1, "A": 2}, {"a": 2, "A": 2}])
with raises(TypeError, match="df1 must be a pandas DataFrame"):
compare = datacompy.Compare("a", "a", ["a"])
with raises(ValueError, match="df1 must have all columns from join_columns"):
compare = datacompy.Compare(df, df.copy(), ["b"])
with raises(ValueError, match="df1 must have unique column names"):
compare = datacompy.Compare(df, df.copy(), ["a"])
df_dupe = pd.DataFrame([{"a": 1, "b": 2}, {"a": 1, "b": 3}])
assert datacompy.Compare(df_dupe, df_dupe.copy(), ["a", "b"]).df1.equals(df_dupe)
def test_compare_df_setter_good():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = pd.DataFrame([{"A": 1, "B": 2}, {"A": 2, "B": 3}])
compare = datacompy.Compare(df1, df2, ["a"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a"]
compare = datacompy.Compare(df1, df2, ["A", "b"])
assert compare.df1.equals(df1)
assert compare.df2.equals(df2)
assert compare.join_columns == ["a", "b"]
def test_compare_df_setter_different_cases():
df1 = pd.DataFrame([{"a": 1, "b": 2}, {"a": 2, "b": 2}])
df2 = | pd.DataFrame([{"A": 1, "b": 2}, {"A": 2, "b": 3}]) | pandas.DataFrame |
#%%
import numpy as np
import pandas as pd
import altair as alt
from altair_saver import save
import futileprot as fp
colors, palette = fp.viz.altair_style()
data = pd.read_csv('../../../../data/spectrophotometry/2021-06-24_growth_curves/2021-06-24_growth_curves.csv')
tidy, params, opts = fp.growth.infer_growth_rate(data,
groupby=['growth_medium', 'strain'],
od_bounds=[0.04, 0.4],
print_params=False)
# %%
# Rename the strains in the data and parameter dataframes
tidy['strain'] = [s.replace('delta_', 'Δ') for s in tidy['strain'].values]
params['strain'] = [s.replace('delta_', 'Δ') for s in params['strain'].values]
points = alt.Chart(tidy,
width=200,
height=200
).mark_line(
point=True,
opacity=0.75
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density', scale=alt.Scale(type='log')),
color=alt.Color('strain:N',
title='strain',
scale=alt.Scale(scheme='tableau10'))
).facet(column=alt.Column('growth_medium:N',
header=alt.Header(labelFontSize=15)),
row=alt.Row('strain:N',
header=alt.Header(labelFontSize=15))
).resolve_scale(x='independent')
save(points, '2021-06-24_KO_growth_curves.pdf')
# %%
# Make a plot of the growth rates for each medium
growth_rates = params[params['parameter']=='growth_rate']
plots = []
for g, d in growth_rates.groupby(['growth_medium']):
min_val = 0.9 * d['map_val'].min()
max_val = 1.1 * d['map_val'].max()
lam_base= alt.Chart(d,
width=400,
height=200,
).transform_calculate(
ymin='datum.map_val-datum.cred_int',
ymax='datum.map_val+datum.cred_int'
).encode(
x=alt.X(
'strain:N',
title='strain'),
color=alt.Color(
'strain:N',
title='strain',
scale=alt.Scale(scheme='tableau10')))
lam_points = lam_base.mark_point(size=80,
opacity=0.75
).encode(
y=alt.Y('map_val:Q',
title='growth rate [inv. hr]',
scale=alt.Scale(domain=[min_val, max_val])),
)
lam_lines = lam_base.mark_errorbar(
).encode(
y='ymin:Q',
y2='ymax:Q'
)
_plot = (lam_points + lam_lines).properties(title=g)
plots.append(_plot)
plot = plots[0] & plots[1]
save(plot, '2021-06-24_KO_growth_rates.pdf')
#%%
# Plot the fits
plots = 0
for g, d in params.groupby(['strain']):
_plots = 0
for _g, _d in d.groupby(['growth_medium']):
# Get the data
_data = tidy[(tidy['growth_medium'] == _g) &
(tidy['strain'] == g)]
points = alt.Chart(
data=_data,
width=200,
height=200
).mark_point(
size=80,
opacity=0.75
).encode(
x=alt.X('elapsed_time_hr:Q',
title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q',
title='optical density [a.u.]',
scale=alt.Scale(type='log')),
)
# Compute the fit
od_init = _d[_d['parameter']=='od_init']['map_val'].values[0]
lam = _d[_d['parameter']=='growth_rate']['map_val'].values[0]
time_range = np.linspace(0, _data['elapsed_time_hr'].max(), 200)
fit = od_init * np.exp(lam * time_range)
__df = | pd.DataFrame([]) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # series_tools:
#
# set of tools that work with streamflow records.
# - Identify events.
# - Identidy baseflow and runoff.
#
import pandas as pd
import numpy as np
# ## Digital filters
#
# Collection of functions to separate runoff from baseflow.
# +
def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8):
'''Digital filters to separate baseflow from runoff in a continuos time series.
Parameters:
- tipo: type of filter to be used.
- Eckhart o 1.
- Nathan o 2.
- Chapman o 3.
- Q: pandas series with the streamflow records.
- a: paramter for the filter.
- Eckhart: 0.98.
- Nathan: 0.8.
- Chapman: 0.8.
- BFI: 0.8 only applies for Eckhart filter.
Returns:
- Pandas DataFrame with the Runoff, Baseflow.'''
#Functions definitions.
def Nathan1990(Q, a = 0.8):
'''One parameter digital filter of Nathan and McMahon (1990)'''
R = np.zeros(Q.size)
c = 1
for q1,q2 in zip(Q[:-1], Q[1:]):
R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1)
if R[c]<0:
R[c] = 0
elif R[c]>q2:
R[c] = q2
c += 1
B = Q - R
return R, B
def Eckhart2005(Q, BFI=0.8, a = 0.98):
'''Two parameter Eckhart digital filter
Parameters:
- Q: np.ndarray with the streamflow records.
- BFI: The maximum amount of baseflow (%).
- a: parameter alpha (0.98)
Output:
- R: total runoff.
- B: total baseflow.'''
#SEparation
B = np.zeros(Q.size)
B[0] = Q[0]
c = 1
for q in Q[1:]:
#SEparation equation
B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI)
#Constrains
if B[c] > q:
B[c] = q
c+=1
R = Q - B
return R, B
def ChapmanMaxwell1996(Q, a = 0.98):
'''Digital filter proposed by chapman and maxwell (1996)'''
B = np.zeros(Q.size)
c = 1
for q in Q[1:]:
B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q
c+=1
R = Q-B
return R,B
#Cal the filter
if tipo == 'Eckhart' or tipo == 1:
R,B = Eckhart2005(Q.values, a, BFI)
elif tipo =='Nathan' or tipo == 2:
R,B = Nathan1990(Q.values, a,)
elif tipo == 'Chapman' or tipo ==3:
R,B = ChapmanMaxwell1996(Q.values, a)
#Returns the serie
return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow'])
# -
# ## Events selection functions
#
# Collection of functions to identify peaks in a series and the end of each peak recession.
# +
def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')):
'''Find the peack values of the hydrographs of a serie
Params:
- Q: Pandas serie with the records.
- Qmin: The minimum value of Q to be considered a peak.
if None takes the 99th percentile of the series as the min
- tw: size of the ime window used to eliminate surrounding maximum values'''
if Qmin is None:
Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99)
#Find the maximum
Qmax = Q[Q>Qmin]
QmaxCopy = Qmax.copy()
#Search the maxium maximorums
Flag = True
PosMax = []
while Flag:
MaxIdx = Qmax.idxmax()
PosMax.append(MaxIdx)
Qmax[MaxIdx-tw:MaxIdx+tw] = -9
if Qmax.max() < Qmin: Flag = False
#Return the result
return QmaxCopy[PosMax].sort_index()
def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'):
'''Find the end of each selected event in order to know the
longitude of each recession event.
Parameters:
- Q: Pandas series with the records.
- Qmax: Pandas series with the peak streamflows.
- minDif: The minimum difference to consider that a recession is over.
Optional:
- minDistance: minimum temporal distance between the peak and the end.
- maxSearch: maximum number of iterations to search for the end.
- Widow: Size of the temporal window used to smooth the streamflow
records before the difference estimation (pandas format).
Returns:
- Qend: The point indicating the en of the recession.'''
#Obtains the difference
X = Q.resample('1h').mean()
dX = X.values[1:] - X.values[:-1]
dX = pd.Series(dX, index=X.index[:-1])
#Obtains the points.
DatesEnds = []
Correct = []
for peakIndex in Qmax.index:
try:
a = dX[dX.index > peakIndex]
if minDistance is None:
DatesEnds.append(a[a>minDif].index[0])
else:
Dates = a[a>minDif].index
flag = True
c = 0
while flag:
distancia = Dates[c] - peakIndex
if distancia > minDistance:
DatesEnds.append(Dates[c])
flag= False
c += 1
if c>maxSearch: flag = False
Correct.append(0)
except:
DatesEnds.append(peakIndex)
Correct.append(1)
#Returns the pandas series with the values and end dates
Correct = np.array(Correct)
return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0]
# -
# ## Runoff analysis
# +
def Runoff_SeparateBaseflow(Qobs, Qsim):
'''From observed records obtain the baseflow and runoff streamflow records.
Parameters:
- Qobs: Observed record dt < 1h.
- Qsim: Simulated records dt < 1h.
Returns:
- Qh: Observed records at hourly scale.
- Qsh: Simulated records at a hourly scale.
- Qsep: Observed separated records at hourly scale'''
#Observed series to hourly scale.
Qh = Qobs.resample('1h').mean()
Qh[np.isnan(Qh)] = Qh.mean()
Qh[Qh<0] = Qh.mean()
Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998)
#Pre-process of simulated series to hourly scale.
Qsh = Qsim.resample('1h').mean()
Qsh[np.isnan(Qsh)] = 0.0
#Return results
return Qh, Qsh, Qsep
def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None):
'''Separates runoff from baseflow and finds the events.
Parameters:
- Qobs: Hourly obseved streamflow.
- Qsim: Hourly simulated streamflow.
- minTime: minimum duration of the event.
- minConcav: minimum concavity of the event.
- minPeak: minimum value of the peakflows.
Returns:
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.'''
#Obtain the positions of the start and
pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20))
pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak)
#Returns results
return pos1, pos2
def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None):
'''Obtains the DataFrame with the resume of the RC analysis.
Parameters:
- Area: the area of the basin in km2.
- Qobs: Hourly observed streamflow.
- Rain: Hourly rainfall.
- Qsep: Hourly dataFrame with the separated flows.
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.
- N: Number of days to eval the rainfall between p1-N: p2.
- Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N.
Results:
- DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax'''
#Search for N
if N is None:
#Time window based on the basin area.
N = Area**0.2
N = np.floor(N) // 2 * 2 + 1
if N<3: N = 3
if N>11: N = 11
Ndays = pd.Timedelta(str(N)+'d')
if Nant is None:
Nant = pd.Timedelta(str(N+3)+'d')
else:
Ndays = N
if Nant is None:
Nant = N + | pd.Timedelta('3d') | pandas.Timedelta |
import numpy as np
import argparse
import os
import abc
from os import path
import pandas as pd
from torch.utils.data import Dataset
import torch
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
from torchvision.transforms import ToTensor, Normalize, Compose, Lambda
FILENAME_TYPE = {'full': '_T1w_space-MNI152NLin2009cSym_res-1x1x1_T1w',
'cropped': '_T1w_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_T1w',
'skull_stripped': '_space-Ixi549Space_desc-skullstripped_T1w'}
class ToTensor(object):
"""Convert image type to Tensor and diagnosis to diagnosis code"""
def __call__(self, image):
np.nan_to_num(image, copy=False)
image = image.astype(float)
return torch.from_numpy(image[np.newaxis, :]).float()
class MinMaxNormalization(object):
"""Normalizes a tensor between 0 and 1"""
def __call__(self, image):
return (image - image.min()) / (image.max() - image.min())
def load_data(train_val_path, diagnoses_list, baseline=True):
train_df = pd.DataFrame()
valid_df = pd.DataFrame()
test_df = pd.DataFrame()
train_path = path.join(train_val_path, 'train')
valid_path = path.join(train_val_path, 'validation')
test_path = path.join(train_val_path, 'test')
for diagnosis in diagnoses_list:
if baseline:
train_diag_path = path.join(
train_path, diagnosis + '_baseline.tsv')
else:
train_diag_path = path.join(train_path, diagnosis + '.tsv')
valid_diag_path = path.join(valid_path, diagnosis + '_baseline.tsv')
test_diag_path = path.join(test_path, diagnosis + '_baseline.tsv')
train_diag_df = pd.read_csv(train_diag_path, sep='\t')
valid_diag_df = | pd.read_csv(valid_diag_path, sep='\t') | pandas.read_csv |
__author__ = 'jlu96'
import numpy as np
from sklearn.model_selection import LeaveOneOut
import matplotlib as mpl
import importlib
mpl.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import collections
import fit_methods as fm
import geneTSmunging as gtm
import copy
global test2fit_method
test2fit_method = {'e': fm.fit_enet, "l": fm.fit_lasso, "r": fm.fit_ridge}
global args2stratify_by
args2stratify_by = {"e": "effect", "n": "none"}
def cross_validate(X_matr, lag, fit_method, hyperlist, rows=None, has_reps=False, **kwargs):
"""
X_matr: n x T matrix of genes
lag
fit_method
hyperlist: list of settings of "hyper"
rows: optional restriction to a certain list of response variables
For each hyperparam setting, use that hyperparam in fitting the whole data and evaluating prediction error
Take the hyperparam with least avg mse.
Return results from each hyperparam setting and the best hyperparam
Return: best_hyper, best, hyper_df
"""
n = X_matr.shape[0]
T = X_matr.shape[1]
if rows == None:
rows = list(range(n))
hyper_fit_dfs = []
for hyper in hyperlist:
fit_results = []
loo = LeaveOneOut().split(X_matr)
# Iterate over all the genes
for train_index, test_index in loo:
# Only test the responses in the appropriate row
if test_index in rows:
_, X_test = X_matr[train_index], X_matr[test_index]
# there is a 3rd axis
if has_reps:
Y_test = np.reshape(X_test, (1, T, X_test.shape[2]))
else:
Y_test = np.reshape(X_test, (1, T))
fit_result = collections.OrderedDict()
fit_result["hyper"] = hyper
fit_result["row"] = test_index[0]
# Since Y_test is not in X_train, replace_rows is None
fit_result.update(fm.perform_loto_cv(X_matr=X_matr, Y_matr=Y_test, lag=lag, fit_method=fit_method,
hyper=hyper, replace_row=test_index,
has_reps=has_reps, **kwargs))
fit_results.append(fit_result)
fit_result_df = pd.DataFrame(fit_results)
hyper_fit_dfs.append(fit_result_df)
print("Hyper: ", hyper)
print(fit_result_df.head(n=20))
return hyper_fit_dfs
def summarize_hyper_fit_dfs(hyper_fit_dfs, hyperlist):
"""
:param hyper_fit_dfs: Result of fit per ttimepoint
:param hyperlist: Hyperparameters
:return: hyper_df: Summary of overall performance for each hyper_df
"""
total_sselist = []
avg_dflist = []
std_dflist =[]
avg_r2list = []
std_r2list = []
avg_nlist = []
avg_mselist = []
std_mselist = []
for fit_result_df in hyper_fit_dfs:
stats_df = fit_result_df[["sse", "avg_df", "r2", "n", "mse"]]
total_df = stats_df.sum()
avg_df = total_df / stats_df.shape[0]
std_df = stats_df.std()
total_sselist.append(total_df["sse"])
avg_dflist.append(avg_df["avg_df"])
std_dflist.append(std_df["avg_df"])
avg_r2list.append(avg_df["r2"])
std_r2list.append(std_df["r2"])
avg_nlist.append(avg_df["n"])
avg_mselist.append(avg_df["mse"])
std_mselist.append(std_df["mse"])
summary_dict = collections.OrderedDict()
summary_dict["hyper"] = hyperlist
summary_dict["n_avg"] = avg_nlist
summary_dict["mse_avg"] = avg_mselist
summary_dict["mse_std"] = std_mselist
summary_dict["df_avg"] = avg_dflist
summary_dict["df_std"] = std_dflist
summary_dict["r2_avg"] = avg_r2list
summary_dict["r2_std"] = std_r2list
summary_dict["sse_total"] = total_sselist
hyper_df = pd.DataFrame(summary_dict)
return hyper_df
def get_best_hyper(hyper_df, sort_by="mse_avg", ascending=True):
"""
:param hyper_df:
:param sort_by:
:param ascending:
:return: the best hyper params
"""
hyper_df.sort_values(sort_by, inplace=True, ascending=ascending)
best = hyper_df.head(n=1)
best_hyper = best["hyper"].values[0]
return best_hyper, best, hyper_df
def run_cross_validate(geneTS, fit_method=fm.fit_lasso,
hyperlist=10**(-1 * np.arange(0, 4, 1.0)),
lag=2,
rows=None,
sort_by="mse_avg",
save_prefix=None,
has_reps=False):
"""
Writes out the results for a given hyper-parameter list
"""
print("Hyper-parameters for cross-validation")
print(hyperlist)
# Cross-validate
hyper_fit_dfs = cross_validate(geneTS, lag, fit_method, hyperlist,
rows=rows,
has_reps=has_reps)
hyper_df = summarize_hyper_fit_dfs(hyper_fit_dfs, hyperlist)
best_hyper, best, hyper_df = get_best_hyper(hyper_df, sort_by=sort_by)
print("Hypers results:")
print(hyper_df)
return best_hyper, best, hyper_df, hyper_fit_dfs
def float_to_label(value):
assert isinstance(value, float) or isinstance(value, int)
return "%.0E" % value
def tuple_to_label(value):
assert isinstance(value, tuple)
return "(" + ", ".join([float_to_label(x) for x in value]) + ")"
def hyper_to_label(hyper):
"""
:param hyper: hyperparameter
:return: the corresponding label
"""
if isinstance(hyper, float) or isinstance(hyper, int):
return float_to_label(hyper)
elif isinstance(hyper, tuple):
return tuple_to_label(hyper)
def hyperlist_to_labellist(hyperlist):
"""
:param hyperlist:
:return: labellist, labels to use for plotting
"""
return [hyper_to_label(hyper) for hyper in hyperlist]
def run_to_label(run):
"""
:param run: string containing parameters
:return: string designed for labels
"""
label = run[:]
replacedict = collections.OrderedDict()
replacedict["original"] = "orig"
replacedict["integration"] = "int"
replacedict["0mean-unnormalized"] = "ZU"
replacedict["0mean-1var"] = "ZS"
for key in replacedict:
label = label.replace(key, replacedict[key])
return label
#
# def hyperlist_to_namelist(hyperlist):
# """
# :param hyperlist:
# :return: labellist, labels to use for plotting
# """
# hyper_value = hyperlist[0]
#
# if isinstance(hyper_value, float) or isinstance(hyper_value, int):
# return hyperlist_to_labellist(hyperlist)
#
# elif isinstance(hyper_value, tuple):
# return [hyper.replace(" ", "") for hyper in hyperlist_to_labellist(hyperlist)]
def plot_hyper_boxplot(hyperlist, fit_result_dfs, fit_result_key, xlabel="Hyperparameters", ylabel="Output parameter",
title="Hyperparameters VS Output parameters",filename=None, horizontal_line_color_labels=None, hyper_color_labels=None,
hyper_color_label_margin=0.4):
"""
hyperlist: the list of hyperparameters
fit_result_dfs: each a dataframe, assume ordered as hyperlist
fit_result_key: the column name of fit_result_df to plot
Plots the boxplot with 1.5 IQR
"""
assert len(hyperlist) == len(fit_result_dfs)
hyper_value = hyperlist[0]
try:
# Get length of the hyperparam
label_length = len(hyper_value)
plot_height = 5 + label_length * 0.2
except TypeError:
plot_height = 5
if len(hyperlist) > 10:
figsize = (len(hyperlist), plot_height)
else:
figsize = (8, plot_height)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=figsize)
data = [fit_result_df[fit_result_key].values for fit_result_df in fit_result_dfs]
pos = list(range(1, len(hyperlist) + 1))
plt.boxplot(data, positions=pos, showmeans=True)
axes.yaxis.grid(True)
# avg = np.average(data, axis=1)
# std = np.std(data, axis=1)
# df = np.array([len(fit_result_df) - 1 for fit_result_df in fit_result_dfs ])
# min_val = np.min(data)
# plt.errorbar(pos, avg, yerr=stats.t.ppf(0.95, df) * std, color='r')
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel,fontsize=20)
plt.title(title, fontsize=20)
plt.xticks(pos, hyperlist)
plt.yticks(fontsize=15)
labels = axes.get_xticklabels()
plt.setp(labels, rotation=90, fontsize=15)
if hyper_color_labels != None:
for hyper, color, label in hyper_color_labels:
assert hyper in hyperlist
loc = pos[hyperlist.index(hyper)]
left_loc = loc - hyper_color_label_margin
right_loc = loc + hyper_color_label_margin
plt.axvline(left_loc, color=color, label=label)
plt.axvline(right_loc, color=color)
# plt.axvline(left_loc, color=color, label=label, linestyle='dashed')
# plt.axvline(right_loc, color=color, linestyle='dashed')
if horizontal_line_color_labels !=None:
for line, color, label in horizontal_line_color_labels:
plt.axhline(line, color=color, label=label)
# plt.axhline(line, color=color,label=label, linestyle='dashed')
plt.legend(loc='best')
plt.tight_layout()
if filename:
fig.savefig(filename)
print("Plot saved to ", filename)
plt.show()
plt.close()
def plot_corr_matrix(corr_matr, labels, ylabels=None, title='Correlation matrix', cmap=copy.deepcopy(plt.cm.Blues),
xlabel="Hyperparameters", ylabel="Hyperparameters", filename=None,
min_thresh=0, max_thresh=1, figsize=None,
change_bad_col=True,
bad_col=(1, 0.7, 0.4, 1),
change_over_col=False,
over_col='k',
xticksize=12,
yticksize=12,
titlesize=20,
xlabelsize=20,
ylabelsize=20,
rotate_x=True,
rotate_x_by=90,
colorbar_label="",
colorbar_ticksize=12,
colorbar_labelsize=12,
change_x_pad=False,
x_pad=10,
change_y_pad=False,
y_pad=10,
do_special_marker=False,
special_marker_rows=[],
special_marker_columns=[],
special_marker_color='k',
special_marker_type='*',
special_marker_size=80,
):
"""
:param corr_matr:
:param labels: For the x-axis. Can also be for y-axis
:param ylabels: For y-axis
:param title:
:param cmap:
:param xlabel:
:param ylabel:
:param filename:
:param min_thresh:
:param max_thresh:
:return:
"""
if len(labels) != corr_matr.shape[1]:
raise ValueError("X Labels must have same shape as # corr matrix cols")
try:
len(ylabels)
except TypeError:
if ylabels == None:
ylabels = labels
if len(ylabels) != corr_matr.shape[0]:
raise ValueError("Y labels must have same shape as # corr matrix rows")
if figsize == None:
if corr_matr.shape[0] < 16:
figsize = ((8,8))
else:
figsize = ((corr_matr.shape[1]/2), (corr_matr.shape[0]/2))
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=figsize)
if change_bad_col:
cmap.set_bad(bad_col)
if change_over_col:
cmap.set_over(over_col)
if do_special_marker:
print("Doing special marker. Be wary of wrong turns.")
assert len(special_marker_rows) == len(special_marker_columns)
plt.scatter(special_marker_columns, special_marker_rows, color=special_marker_color,
s=special_marker_size, marker=special_marker_type)
plt.xlim((-0.5, len(labels) - 0.5))
plt.ylim((-0.5, len(ylabels) -0.5))
plt.gca().invert_yaxis()
plt.imshow(corr_matr, interpolation='nearest', cmap=cmap)
cb = plt.colorbar(extend='both')
plt.clim(min_thresh, max_thresh)
cb.ax.tick_params(labelsize=colorbar_ticksize)
cb.set_label(colorbar_label, rotation=90, fontsize=colorbar_labelsize)
#
# axes.set_yticklabels(ylabels, ha='right')
# axes.set_xticklabels(labels, va='top')
# if do_special_marker:
# print "Doing special marker. Be wary of wrong turns."
# assert len(special_marker_rows) == len(special_marker_columns)
# plt.scatter(special_marker_columns, special_marker_rows, color=special_marker_color,
# s=special_marker_size, marker=special_marker_type)
# plt.xlim((-0.5, len(labels) - 0.5))
# plt.ylim((-0.5, len(ylabels)-0.5))
# plt.imshow(corr_matr, interpolation='nearest', cmap=cmap)
xtick_marks = np.arange(len(labels))
plt.xticks(xtick_marks, labels, fontsize=xticksize, va='top')
if rotate_x:
plabels = axes.get_xticklabels()
plt.setp(plabels, rotation=rotate_x_by, fontsize=xticksize, va='top', ha='center')
ytick_marks = np.arange(len(ylabels))
plt.yticks(ytick_marks, ylabels, fontsize=yticksize, ha='right', va='center')
plt.title(title, fontsize=titlesize)
plt.xlabel(xlabel, fontsize=xlabelsize)
plt.ylabel(ylabel, fontsize=ylabelsize)
# position = fig.add_axes([1, 0.01, 0.02, 0.85])
# # cax, kw = mpl.colorbar.make_axes(fig_ax)
# mpl.colorbar.colorbar_factory(position, im)
plt.tight_layout()
if change_x_pad:
axes.tick_params(axis='x', which='major', pad=x_pad)
if change_y_pad:
axes.tick_params(axis='y', which='major', pad=y_pad)
if filename:
fig.savefig(filename, bbox_inches="tight", pad_inches=0.5)
print("Plot saved to ", filename)
plt.show()
plt.close()
def plot_coef(df, cause_gene, effect_gene, lag, coef, savefile=True, file_prefix="", title=None,
cause_label=None, effect_label=None, legend_fontsize=None, ylabel=None, is_ensg=False,
file_type=".pdf", **kwargs):
if title == None:
title = "Cause-Effect pair: (" + cause_gene + ", " + effect_gene + ")"
if savefile == True:
filename = file_prefix + cause_gene.replace(".", ",") + "-" + effect_gene.replace(".", ",") + "-lag-" + str(int(lag)) + "-coef-" + float_to_label(coef) + file_type
print("Filename: ", filename)
else:
filename = None
# if is_ensg:
# label_spacing = 20
# else:
# label_spacing = 10
pco = np.round(coef, 2)
if cause_label == None:
cause_label = "Cause: " + cause_gene+ " (Coef = " + str(pco) + ", Lag = " + str(lag) + ")"
# cause_label = "Cause: " + ("{0:>" + str(label_spacing) + "}").format(cause_gene) + " (Coef = " + str(pco) + ", Lag = " + str(lag) + ")"
if effect_label == None:
effect_label = "Effect: " + effect_gene
# effect_label = "Effect: " + ("{0:>" + str(label_spacing) + "}").format( effect_gene)
gtm.plot_genes(df, [cause_gene, effect_gene], title=title, filename=filename, gene_labels=[cause_label, effect_label], plot_outside=False,
legend_fontsize=legend_fontsize, ylabel=ylabel, **kwargs)
def plot_all_coef(acoefs, df, genes, min_coef=0.01, file_prefix=None, savefile=True, verbose=False, **kwargs):
"""
acoefs: lag x n x n matrix, causes are rows, effets are columns
df: original gene df to plot
genes: list of genes
min_coef: min entry size to be plotted
"""
if verbose:
print("Min Coef to plot: ", min_coef)
# iterate down the coefs of each effect gene. This is one effect gene per column.
for j in range(acoefs.shape[2]):
out_gene = genes[j]
# acoef is a lag x n causes matrix
acoef = acoefs[:, :, j]
# preds is a lag x n index matrix of the good coefficients
preds = np.where(np.absolute(acoef) > min_coef)
pcoefs = acoef[preds]
cause_num = len(preds[0])
if verbose:
if cause_num > 0:
print("Out gene: ", out_gene)
print("Causes: ", cause_num)
for i in range(cause_num):
# the lag
lag = preds[0][i] + 1
cause_gene = genes[preds[1][i]]
effect_gene = out_gene
coef = pcoefs[i]
plot_coef(df, cause_gene, effect_gene, lag, coef, savefile=savefile, file_prefix=file_prefix, **kwargs)
# # the lags are encoded in the first index of acoefs
# lags = preds[0] + 1
# cgenes = genes[preds[1]]
# cos = np.round(acoef, 2)[preds]
#
# if verbose:
# print "Out gene: ", out_gene
#
# for i in range(len(preds)):
#
# plag = lags[i]
# cause_gene = cgenes[i]
# effect_gene = out_gene
# coef = cos[i]
#
# plot_coef(df, cause_gene, effect_gene, plag, coef, savefile=savefile, file_prefix=file_prefix, **kwargs)
# def plot_all_coefs(acoefs, df, genes, lag, min_coef=0.01, num_per_plot=3, file_prefix=None, title_prefix="Causal genes for ", verbose=False,
# **kwargs):
# """
# acoefs: aligned coefs, of form (lag x n x n), acoefs[i] is lag_i+1
# df: Original TS
# genes: list of all genes
# """
#
# # iterate down the coefs of each effect gene. This is one effect gene per column.
# for j in range(acoefs.shape[2]):
# out_gene = genes[j]
#
# acoef = acoefs[:, :, j]
# preds = np.where(np.absolute(acoef) > min_coef)
#
# lags = preds[0] + 1
# cgenes = genes[preds[1]]
# cos = np.round(acoef, 2)[preds]
#
# num_plots = int(np.ceil(len(cgenes) * 1.0 / num_per_plot))
#
# if verbose:
# print "Out gene: ", out_gene
#
# for i in range(num_plots):
# plags = lags[i * len(cgenes)/num_plots: (i+ 1) * len(cgenes)/num_plots]
# pgenes = cgenes[i * len(cgenes)/num_plots: (i+ 1) * len(cgenes)/num_plots]
# pcos = cos[i * len(cgenes)/num_plots: (i+ 1) * len(cgenes)/num_plots]
# labels = ["{0:>20}".format(out_gene + ":") + " Coef, Lag" ] + ["{0:>20}".format(pgene + ":") + " " + str(pco) + ", " + str(plag) for pgene, pco, plag in zip(pgenes, pcos, plags)]
#
# if verbose:
# print "Part: ", i + 1
# print "Lag points: ", plags
# print "Pred genes:", pgenes
# print "Pred coefs: ", pcos
# print "Labels are ", labels
#
# plot_genes(df, [out_gene] + list(pgenes), title=title_prefix + out_gene + " , Part " + str(i+1),
# filename=None if file_prefix == None else file_prefix + out_gene.replace(".", ",") + "_lag-" + str(lag) + \
# "-" + "-".join([x.replace(".", ",") for x in list(pgenes)]),
# gene_labels=labels, plot_outside=False,
# **kwargs)
def fit_all(X_matr, Y_matr, rows, lag, fit_method, save_prefix=None, save_XY=True, verbose=False,
has_reps=False, bootstrap=False, seed=None, only_array=False, **kwargs):
"""
X_matr: m x T of X's
Y_matr: n x T of Y's
rows: rows of X to replace in Y. There should be as many rows to replace in X_matr as there are in Y_matr
lag:
fit_method:
bootstrap:
seed:
only_array: If True, return coefs as a matrix instead
Perform the fit for each Y_matr
Save each X_t and Y_t?
Return: coefs: (m*(lag), m) with the columns n (corresponding w/ rows in X_matr) filled in, intercepts: (1, m), fit_result_df: (n, num_results)
"""
assert len(rows) == Y_matr.shape[0]
assert X_matr.shape[1] == Y_matr.shape[1]
if has_reps:
assert X_matr.shape[2] == Y_matr.shape[2]
# m: number of predictor genes
# n: number of response gene
m = X_matr.shape[0]
n = Y_matr.shape[0]
T = X_matr.shape[1]
coefs = np.zeros((m*lag, m))
intercepts = np.zeros((1, m))
fit_results = []
for i, row in zip(list(range(Y_matr.shape[0])), rows):
if has_reps:
Y = np.reshape(Y_matr[i,], (1, T, Y_matr.shape[2]))
else:
Y = np.reshape(Y_matr[i,], (1, T))
fit_result_dict = collections.OrderedDict()
fit_result_dict["row"] = row
if "hyper" in kwargs:
fit_result_dict["hyper"] = kwargs["hyper"]
X_t, Y_t, Y_pred, coef, intercept, fit_result = fm.perform_test(X_matr=X_matr, Y_matr=Y,
lag=lag, fit_method=fit_method,
replace_row=row,
has_reps=has_reps,
bootstrap=bootstrap,
seed=seed,
**kwargs)
fit_result_dict.update(fit_result)
# These are cause by effect
coefs[:, row] = coef.flatten()
intercepts[:, row] = intercept
fit_results.append(fit_result_dict)
if verbose:
print(i, row)
print("X: ", X_matr)
print("Y: ", Y)
print("X_t: ", X_t)
print("Y_t: ", Y_t)
print("Y_pred: ", Y_pred)
print("Checking Y_pred: ", fm.compute_fit(X_t, Y_t, coef, intercept))
print("coef: ", coef)
print("intercept: ", intercept)
print("fit result: ", fit_result)
fit_result_df = pd.DataFrame(fit_results)
print("Finished fitting all")
# remember coefs are cause-by-effect.
if only_array:
return coefs[:, rows], intercepts[:, rows], fit_result_df
else:
return coefs, intercepts, fit_result_df
def fit_all_random(X_matr, rand_X_matr, Y_matr, rows, lag, fit_method, save_prefix=None, save_XY=True, verbose=False,
has_reps=False, bootstrap=False, seed=None, only_array=False, **kwargs):
"""
X_matr: m x T (x r) of X's
rand_X_matr: m x T ( x r) of X's. Same as X_matr except each row (within each replicate) is permuted independently and randomly
Y_matr: n x T (x r) of Y's
rows: rows of X to replace in Y. There should be as many rows to replace in X_matr as there are in Y_matr
lag:
fit_method:
only_array: just return the relevant rows of the array
Perform the fit for each Y_matr
Save each X_t and Y_t?
Return: coefs: (m*(lag), m) with the columns n (corresponding w/ rows in X_matr) filled in, intercepts: (1, m), fit_result_df: (n, num_results)
"""
assert len(rows) == Y_matr.shape[0]
assert X_matr.shape[1] == Y_matr.shape[1]
assert rand_X_matr.shape[1] == X_matr.shape[1]
if has_reps:
assert X_matr.shape[2] == Y_matr.shape[2]
assert rand_X_matr.shape[2] == X_matr.shape[2]
m = X_matr.shape[0]
n = Y_matr.shape[0]
T = X_matr.shape[1]
coefs = np.zeros((m*lag, m))
# We keep no intercepts
# intercepts = np.zeros((1, m, m))
fit_results = []
for i, row in zip(list(range(n)), rows):
if has_reps:
Y = np.reshape(Y_matr[i,], (1, T, Y_matr.shape[2]))
else:
Y = np.reshape(Y_matr[i,], (1, T))
fit_result_dict = collections.OrderedDict()
fit_result_dict["row"] = row
if "hyper" in kwargs:
fit_result_dict["hyper"] = kwargs["hyper"]
coef, fit_result, coef_temp, coef_temps, intercept_temps = fm.perform_test_random(X_matr=X_matr, rand_X_matr=rand_X_matr,
Y_matr=Y,
lag=lag, fit_method=fit_method,
replace_row=row,
has_reps=has_reps,
bootstrap=bootstrap, seed=seed,
**kwargs)
fit_result_dict.update(fit_result)
# These are cause by effect
coefs[:, row] = coef.flatten()
fit_results.append(fit_result_dict)
if verbose:
print(i, row)
print("X: ", X_matr)
print("Y: ", Y)
# print "X_t: ", X_t
# print "Y_t: ", Y_t
# print "Y_pred: ", Y_pred
# print "Checking Y_pred: ", fm.compute_fit(X_t, Y_t, coef, intercept)
print("coef: ", coef)
print("fit result: ", fit_result)
fit_result_df = | pd.DataFrame(fit_results) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 11:51:21 2019
@author: mahparsa
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 6 13:57:17 2019
@author: mahparsa
This aim to calculate the similairty between sentenses of an interview.
It has all features
"""
import nltk
import numpy as np
from nltk.corpus import PlaintextCorpusReader
from nltk.corpus import gutenberg
import pandas as pd
import nltk
import gensim
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora, models
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.ldamodel import LdaModel
import nltk
nltk.download('wordnet')
import nltk
from gensim import utils
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from gensim.models import Word2Vec
from nltk.tokenize import word_tokenize
from gensim.parsing.preprocessing import stem_text
from nltk.stem import PorterStemmer
import collections
from collections import Counter
from stemming.porter2 import stem
from gensim import corpora, models
from gensim.models.coherencemodel import CoherenceModel
from gensim.models.ldamodel import LdaModel
from numpy import array
nltk.download('averaged_perceptron_tagger')
stop_words = set(stopwords.words('english'))
#from nltk.stem import LancasterStemmer
from nltk.probability import FreqDist
from nltk.tokenize import word_tokenize
from difflib import SequenceMatcher
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def percentage(count, total):
return 100 * count / total
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
def lexical_diversity(text):
return len(set(text)) / len(text)
def READ_INT( parameters ):
"use the root and read files and make a list of that"
corpus_root = parameters # Mac users should leave out C:
corpus = PlaintextCorpusReader(corpus_root, '.*txt') #
doc = pd.DataFrame(columns=['string_values'])
for filename in corpus.fileids():
value1=corpus.raw(filename)
doc = doc.append({'string_values': value1}, ignore_index=True)
docs=doc.values.tolist()
return [docs]
def Pre_Word( doc ):
#provide a list of words.
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
m=str(doc)
mm=m.lower()
mmm=lemmatizer.lemmatize(mm)
return [mmm]
from nltk import sent_tokenize
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_cosine_sim(*strs):
vectors = [t for t in get_vectors(*strs)]
return cosine_similarity(vectors)
docs_A=READ_INT('SZ_A_7')
def Coherence_M_2(docs_A):
#ambigous pronouns
#We can count how often a pronouns occurs in a text, and compute what percentage of the text is taken up by a specific pronouns
#Measure the percentage of the third pronouns to all words.
Pronunce_words = set('he him his himself she her hers herself it its itself they them their theirs themselves'.split())
Coh_M = np.array([])
MyDoc = docs_A
Sim_Sent_Doc = []
for k in range(len(MyDoc)):
doc=[]
doc=str(MyDoc[k])
Sent_doc=sent_tokenize(doc)
tokenized_word=word_tokenize(doc)
word_tokens = [w for w in tokenized_word if w.isalpha()]
Third_Pronouns = [w for w in word_tokens if w in Pronunce_words]
U_Third_Pronouns=list(set(Third_Pronouns))
U_word_tokens=list(set(word_tokens))
Co_3=len(U_Third_Pronouns)/len(U_word_tokens)
Coh_M=np.append( Coh_M, Co_3)
return[Coh_M]
#
def Coherence_M_3(docs_A):
#ratio of the first pronouns to all pronouns.
import pandas as pd
A_Pronunce_words = set('I my myself mine we us ourselves ours'.split())
Coh_M = np.array([])
MyDoc = docs_A
Sim_Sent_Doc = []
for k in range(len(MyDoc)):
doc=[]
doc=str(MyDoc[k])
tokenized_word=[]
tokenized_word=word_tokenize(doc)
word_tokens=[]
word_tokens = [w for w in tokenized_word if w.isalpha()]
tagg = pd.DataFrame()
tagg=pd.DataFrame(nltk.pos_tag(word_tokens))
Index_NP = list(np.where( tagg[1] == "PRP" ))
First_Pronouns = [w for w in word_tokens if w in A_Pronunce_words]
N_P=len( First_Pronouns)
N_N = len(Index_NP)
Co_5=N_P/N_N
Coh_M=np.append( Coh_M, Co_5)
return[Coh_M]
def Coherence_M_4(docs_A):
#ratio of the third pronouns to names of persons.
import pandas as pd
A_Pronunce_words = set('he him his himself she her hers herself they them their theirs themselves'.split())
Coh_M = np.array([])
MyDoc = docs_A
Sim_Sent_Doc = []
for k in range(len(MyDoc)):
doc=[]
doc=str(MyDoc[k])
tokenized_word=[]
tokenized_word=word_tokenize(doc)
word_tokens=[]
word_tokens = [w for w in tokenized_word if w.isalpha()]
tagg = | pd.DataFrame() | pandas.DataFrame |
import math
from tqdm import tqdm
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder, QuantileTransformer
from sklearn.neighbors import NearestNeighbors
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
'''
In this file we have all the preprocessing methods
1- Intrinsic dimensionality
2- define feature and target
3- identify numeric and categorical features
'''
"""
Python Implementation of 'Maximum Likelihood Estimation of Intrinsic Dimension' <NAME> and <NAME>, Advances in neural information processing systems, 2005
----------
The goal is to estimate intrinsic dimensionality of data, the estimation of dimensionality is scale dependent
(depending on how much you zoom into the data distribution you can find different dimesionality), so they
propose to average it over different scales, the interval of the scales [k1, k2] are the only parameters of the algorithm.
This code also provides a way to repeat the estimation with bootstrapping to estimate uncertainty.
"""
def load_data(data, filetype):
"""
loads data from CSV file into dataframe.
Parameters
----------
data : String
path to input dataset
filetype : String
type of file
Returns
----------
df_Data : dataframe
dataframe with data
"""
if filetype == "csv":
df_Data = pd.read_csv(data)
else:
df_Data = pd.read_excel(data)
df_Data = df_Data.sample(1000)
df_Data.reset_index(inplace = True)
return df_Data
def intrinsic_dim_sample_wise(X, k=5):
"""
Computes intrinsic dimensionality based on sample.
Parameters
----------
X : dataframe, dataset
k : integer, number of neighbors
Returns
----------
intdim_sample : integer, intrinsic dimensionality based on sample
"""
neighb = NearestNeighbors(n_neighbors=k + 1).fit(X)
dist, ind = neighb.kneighbors(X)
dist = dist[:, 1:]
dist = dist[:, 0:k]
assert dist.shape == (X.shape[0], k)
assert np.all(dist > 0)
d = np.log(dist[:, k - 1: k] / dist[:, 0:k-1])
d = d.sum(axis=1) / (k - 2)
d = 1. / d
intdim_sample = d
return intdim_sample
def intrinsic_dim_scale_interval(X, k1=10, k2=20):
"""
Computes intrinsic dimensionality for a given scale interval.
Parameters
----------
X : dataframe, dataset
k1 : integer, number of neighbors (start of range)
k2 : integer, number of neighbors (end of range)
Returns
----------
intdim_k : integer, intrinsic dimensionality for a given scale interval
"""
X = | pd.DataFrame(X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# author:zhengk
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
from matplotlib.font_manager import FontProperties
def timeline_plot():
df_ori = | pd.read_csv('articles.csv', sep=';', header=None) | pandas.read_csv |
import numpy as __np
import pandas as __pd
import ruptures as __rpt
import matplotlib.pyplot as __plt
from sklearn.preprocessing import LabelEncoder as __LabelEncoder
def create_roll(ser_, nb_roll=100):
ser_ = ser_.rolling(nb_roll, min_periods=1).mean()
min_ser = min(ser_)
max_ser = max(ser_ * 1.3)
return ser_, min_ser, max_ser
def iqr_detect_outlier(data):
"""
Detect outliers in data given Boxplot rule:
X is an outlier <=> X>q3+1.5*IQR or X<q1-1.5*IQR
Args:
data (Array): list of values to check outliers
"""
q1, q3 = __np.percentile(data, [25, 75])
iqr = q3 - q1
cut_off = iqr * 1.5
lower_bound, upper_bound = q1 - cut_off, q3 + cut_off
return data[(data <= lower_bound) | (data >= upper_bound)]
def zscore_detect_outlier(data):
"""
Detect outliers in data given Zscore rule:
X is an outlier <=> |X|>E(X)+3*S(X)
Args:
data (Array): list of values to check outliers
"""
threshold = 3
mean = __np.mean(data)
std = __np.std(data)
z_score = __np.abs((data - mean) / std)
outliers_idx = z_score[z_score > threshold].index
return data.iloc[outliers_idx]
def shift_detect(data, model="l2", width=40, noise_std=4, debug=False):
"""
Shift detection using window based method (see gitlab wiki for more info)
Args:
data (Array) : list of values to check outliers
model (String) : which distance to use
width (int) : Window width
noise_std(float): std for estimated noise
debug (Bool) : to display shift in data
Returns:
List: shift starting points
"""
n = len(data)
pen = __np.log(n) * noise_std ** 2
algo = __rpt.Window(width=width, model=model).fit(data)
shifts = algo.predict(pen=pen)
if debug:
__rpt.show.display(data, shifts, figsize=(10, 6))
__plt.show()
return shifts[:-1]
def get_normalized_serie(df, col):
normalized = ((df[col] - df[col].min()) / (df[col].max() - df[col].min()) + 1)
return normalized
def encode_serie(serie):
lbl = __LabelEncoder()
lbl.fit(serie.apply(str).values)
serie = lbl.transform(list(serie.apply(str).values)) # Ajout du str (sinon fonctionne pas sur nombre)
return serie, lbl
# /Users/thibaud/Documents/Python_scripts/02_Projects/SNCF/open_data/sncf_utils.py
def transform_category_to_color(df, col_name, colors=None):
if colors is None:
colors = ['red', 'blue', 'green', 'yellow', 'orange'] * 50
ser = df[col_name]
ser, _ = encode_serie(ser)
ser = __pd.Series(ser).apply(lambda x: colors[int(x)])
return ser
def min_max_date(df):
min_date = df['Year'].min()
max_date = df['Year'].max()
if min_date > max_date:
tmp = min_date
min_date = max_date
max_date = tmp
return min_date, max_date
def get_dummy(df, col):
"""
Transform a pandas Series into dummies col
"""
ser_ = df[col]
dummies_data = __pd.get_dummies(ser_, drop_first=False)
dummies_data.columns = ['{}_{}'.format(col, i) for i in dummies_data.columns]
df = | __pd.concat([df, dummies_data], axis=1) | pandas.concat |
######
# Author: <NAME>
# this file loads and organizes
# Foundation data for further use
######
import numpy as np
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
from datetime import datetime,timedelta
from tqdm import tqdm
import matplotlib.gridspec as gridspec
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
import copy
# function to load the data
def load_data():
print("loading data...")
art_metadata_df = pd.read_csv("data/nft_metadata.csv")
bidding_df = pd.read_csv("data/bid_data.csv")
bidding_df['bidding_dt'] = bidding_df.bid_date.apply(lambda x: datetime.utcfromtimestamp(int(x)))
bidding_df['bidding_d'] = bidding_df.bidding_dt.apply(lambda x: x.date())
listing_df = pd.read_csv("data/list_data.csv")
listing_df['list_dt'] = listing_df.listing_date.apply(lambda x: datetime.utcfromtimestamp(int(x)))
listing_df['listing_d'] = listing_df.list_dt.apply(lambda x: x.date())
minting_df = pd.read_csv("data/mint_data.csv")
minting_df['mint_dt'] = minting_df.mint_date.apply(lambda x: datetime.utcfromtimestamp(int(x)))
minting_df['minting_date'] = minting_df.mint_dt.apply(lambda x: x.date())
daily_ether_price = pd.read_csv("data/daily-usd-ether-data.csv")
daily_ether_price['d'] = daily_ether_price['Date(UTC)'].apply(lambda x: datetime.strptime(x, '%m/%d/%Y'))
daily_ether_price['d'] = daily_ether_price.d.apply(lambda x: x.date())
# map ether price
bidding_df = pd.merge(bidding_df, daily_ether_price[['Value','d']], how='left', left_on='bidding_d',
right_on='d')
bidding_df['cost_at_time'] = bidding_df.bidding_amt*bidding_df.Value
bidding_df = bidding_df[['token_id','bidding_amt','bid_date','bidding_dt','bidding_d',
'creator','bidder_id','cost_at_time']]
listing_df = pd.merge(listing_df, daily_ether_price[['Value','d']], how='left', left_on='listing_d',
right_on='d')
listing_df['cost_at_time'] = listing_df.listing_amt*listing_df.Value
listing_df = listing_df[['token_id','listing_amt','creator',
'listing_date','list_dt','listing_d','cost_at_time']]
## reselling
second_list_dt = []
token_id_list = []
for i in tqdm(listing_df.token_id.unique()):
if len(listing_df[listing_df.token_id == i].list_dt.tolist()) >1:
t = listing_df[listing_df.token_id == i].sort_values('list_dt')
second_list_dt.append(t.list_dt.tolist()[1])
token_id_list.append(i)
second_list_df = pd.DataFrame({'token_id':token_id_list,'second_list_dt':second_list_dt})
print("N art re-listed:", second_list_df.token_id.nunique())
tmp = pd.merge(bidding_df, second_list_df, how='left',on='token_id')
tmp['second_list_dt'] = tmp.second_list_dt.fillna(datetime.strptime('2021-9-28','%Y-%m-%d'))
print("N art resold:", tmp[tmp.bidding_dt >tmp.second_list_dt].token_id.nunique())
tmp = tmp[tmp.bidding_dt < tmp.second_list_dt]
# retain the primary art bids
bidding_df = copy.deepcopy(tmp)
####
# note the listing also shows re-selling dates
# for this we only filter the primary market
####
first_list_df = listing_df.groupby('token_id').list_dt.min().to_frame()
first_list_df['token_id'] = first_list_df.index
first_list_df.index = range(len(first_list_df))
first_list_df = pd.merge(first_list_df,listing_df, how='left', on=['token_id','list_dt'])
# retain the first listing date
listing_df = copy.deepcopy(first_list_df)
final_bid_df = bidding_df.groupby('token_id').bidding_dt.max().to_frame()
final_bid_df['token_id'] = final_bid_df.index
final_bid_df.index = range(len(final_bid_df))
final_bid_df['final_d_bidding'] = final_bid_df.bidding_dt.apply(lambda x: x.date())
final_bid_df['final_t_bidding'] = final_bid_df.bidding_dt.apply(lambda x: datetime.strftime(x, '%H:%M'))
final_bid_df.columns = ['final_dt_bidding','token_id','final_d_bidding','final_t_bidding']
max_bid_df = bidding_df.groupby('token_id').cost_at_time.max().to_frame()
max_bid_df['art'] = max_bid_df.index
max_bid_df.index = range(len(max_bid_df))
max_bid_df.columns = ['selling_price_usd','token_id']
max_bid_df_2 = bidding_df.groupby('token_id').bidding_amt.max().to_frame()
max_bid_df_2['art'] = max_bid_df_2.index
max_bid_df_2.index = range(len(max_bid_df_2))
max_bid_df_2.columns = ['selling_price_eth','token_id']
max_bid_df = pd.merge(max_bid_df, max_bid_df_2, how='inner')
max_bid_df = pd.merge(max_bid_df, listing_df[['token_id','list_dt','creator']])
max_bid_df = | pd.merge(max_bid_df, final_bid_df[['token_id','final_t_bidding','final_d_bidding','final_dt_bidding']]) | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
'''
s0_copyAttr.py
Copies attributes from Wei's request, e.g. ACCTYPE (accident type)
Sorts dataframe attributes in the pre-defined order
'''
import os
import pandas as pd
from collections import defaultdict
from s0_xlsx2csv import convert_xlsx2csv
def match(first, second):
"""
Tell if two strings match, regardless of letter capitalization
input
-----
first : string
first string
second : string
second string
output
-----
flag : bool
if the two strings are approximately the same
"""
if len(first) != len(second):
return False
for s in range(len(first)):
fir = first[s]
sec = second[s]
if fir.lower() != sec and fir.upper() != sec:
return False
return True
def find_match(col, ref_list):
"""
Tell if a string col has an approximate match in ref_list
input
-----
col : string
the string to be searched for
ref_list : array_like
the list where the search takes place
output
-----
If there is a match in the list for the col
"""
for ref_col in ref_list:
if match(col, ref_col):
if col == 'ACCTYPE':
print(ref_col)
return True
return False
def rename_col(col, ref_list):
"""
find the match and rename the col to the matched in the ref_list
input
-----
col : string
the string to be searched for
ref_list : array_like
the list where the search takes place
output
-----
remapped name
"""
for ref_col in ref_list:
if match(col, ref_col):
return ref_col
return None
def read_defined_order(request_form='../yin_hsis_data_request.xlsx'):
"""
Read the request form and the pre-defined ordered list of attributes
input
-----
request_form : string
the data request form made by Shuyi
output
-----
attributes : dic of array_like
dictionary of ordered attribute lists for each file
"""
attributes = {}
for tab in ['acc', 'curv', 'grad', 'occ', 'peds', 'road', 'veh']:
curr = | pd.read_excel(request_form, tab) | pandas.read_excel |
from datetime import datetime
from functools import reduce
import logging
from multiprocessing import cpu_count
from multiprocessing import Pool
import os
from bs4 import BeautifulSoup
import urllib
import pandas as pd
import numpy as np
NCORES = cpu_count()
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
def retrieve_general_data(url_page):
"""Scraping items from search page.
:param url_page: url of webpage to make the scraping.
:type url_page: str
:return: Dataframe with information of each item
:rtype: pandas.DataFrame
:Example:
>>> from scraping_details import retrieve_general_data
>>> url_page = 'https://www.infocasas.com.uy/venta/inmuebles/montevideo/pagina3'
>>> retrieve_general_data(url_page)
"""
logging.debug('%s', url_page)
url_base = '/'.join(url_page.split('/')[:3])
try:
page = urllib.request.urlopen(url_page)
except urllib.error.HTTPError as e:
print('HTTPError: {}'.format(e.code))
return pd.DataFrame([])
except urllib.error.URLError as e:
print('URLError: {}'.format(e.reason))
return pd.DataFrame([])
soup = BeautifulSoup(page, 'html.parser')
next_page = (soup.find('div', attrs={'id': 'paginado'})
.find('a', attrs={'class': 'next'}))
if next_page and (url_page < next_page.attrs['href']):
result = pd.DataFrame({})
else:
table = soup.find_all('div', attrs={'class': 'propiedades-slider'})
neighborhood = [
[k.text for k in p.find_all('p')] for t in table
for p in t.find_all('div')
if 'singleLineDots' in p['class']
]
price = [p.text.split()[-1] for t in table
for p in t.find_all('div') if 'precio' in p['class']]
desc = [[k.text for k in p.find_all('p')] for t in table
for p in t.find_all('div') if
'inDescription' in p['class']]
desc = [k[0] for k in desc]
details = [[d.find_all('span')[0].text for d in p.find_all('div')]
for t in table for p in t.find_all('div')
if 'contentIcons' in p['class']]
details = pd.DataFrame(details,
columns=['rooms', 'bathrooms', 'area_m2'])
data_id = [k.get('data-id', '') for k in table]
data_idproject = [k.get('data-idproyecto', '') for k in table]
link = [url_base + k.find('a')['href'] for k in table]
proyecto_label = [
k.find(class_='proyectoLabel').get_text() if k.find(
class_='proyectoLabel') else None for k in table]
df = pd.DataFrame(neighborhood, columns=['neighborhood', 'type'])
df['price'] = price
df['desc'] = desc
df['url'] = link
df['id'] = data_id
df['idproject'] = data_idproject
df['project_label'] = proyecto_label
df['page'] = url_page
result = pd.concat([details, df], axis=1)
return result
def retrieve_property_details(url_page):
"""Scraping details of a item from its web page.
:param url_page: url of webpage to make the scraping.
:type url_page: str
:return: Dataframe with information of each item
:rtype: pandas.DataFrame
:Example:
>>> from scraping_details import retrieve_property_details
>>> url_page = 'https://www.infocasas.com.uy/venta-edificio-o-local-jacinto-vera-ideal-colegios-o-empresa-1554m/185865494?v'
>>> retrieve_property_details(url_page)
"""
logging.debug('%s', url_page)
try:
page = urllib.request.urlopen(url_page)
except urllib.error.HTTPError as e:
print('HTTPError: {}'.format(e.code))
return pd.Series({'uri': url_page})
except urllib.error.URLError as e:
print('URLError: {}'.format(e.reason))
return pd.Series({'uri': url_page})
soup = BeautifulSoup(page, 'html.parser')
ficha_tecnica = soup.find_all(class_='ficha-tecnica')
amenities = soup.find_all(id='amenities')
description = soup.find(id='descripcion')
agency = soup.find('p', class_='titulo-inmobiliaria')
price = soup.find('p', class_='precio-final')
title = soup.find('h1', class_='likeh2 titulo one-line-txt')
kind = soup.find('p', class_='venta')
# visitation = soup.find_all(class_='allContentVisitation')
details = {item.find('p').get_text()[:-1].replace(' ', '_'): item.find('div').get_text()
for item in ficha_tecnica[0].find_all(class_='lista')} if ficha_tecnica else {}
details['extra'] = ','.join(
[key.find('p').get_text() for key in amenities[0].find_all(class_='lista active')]) if amenities else ''
details['descripcion'] = '. '.join([p.get_text() for p in description.find_all('p')]) if description else ''
details['url'] = url_page
details['inmobiliaria'] = agency.get_text() if agency else ''
details['precio'] = price.get_text() if price else ''
details['titulo_publicacion'] = title.get_text() if title else ''
details['tipo_propiedad'] = kind.get_text() if kind else ''
return | pd.Series(details) | pandas.Series |
import thunderbolt
import unittest
from unittest.mock import patch
from mock import MagicMock
from contextlib import ExitStack
import pandas as pd
from thunderbolt.client.local_directory_client import LocalDirectoryClient
from thunderbolt.client.gcs_client import GCSClient
from thunderbolt.client.s3_client import S3Client
class TestThunderbolt(unittest.TestCase):
def setUp(self):
def get_tasks():
return []
module_path = 'thunderbolt.client'
with ExitStack() as stack:
for module in ['local_directory_client.LocalDirectoryClient', 'gcs_client.GCSClient', 's3_client.S3Client']:
stack.enter_context(patch('.'.join([module_path, module, 'get_tasks']), side_effect=get_tasks))
self.tb = thunderbolt.Thunderbolt(None, use_cache=False)
def test_get_client(self):
source_workspace_directory = ['s3://', 'gs://', 'gcs://', './local', 'hoge']
source_filters = []
source_tqdm_disable = False
target = [S3Client, GCSClient, GCSClient, LocalDirectoryClient, LocalDirectoryClient]
for s, t in zip(source_workspace_directory, target):
output = self.tb._get_client(s, source_filters, source_tqdm_disable, False)
self.assertEqual(type(output), t)
def test_get_tasks_dic(self):
tasks_list = [{
'task_name': 'task',
'last_modified': 'last_modified_2',
'task_params': 'task_params_1',
'task_hash': 'task_hash_1',
'task_log': 'task_log_1'
}, {
'task_name': 'task',
'last_modified': 'last_modified_1',
'task_params': 'task_params_1',
'task_hash': 'task_hash_1',
'task_log': 'task_log_1'
}]
target = {
0: {
'task_name': 'task',
'last_modified': 'last_modified_1',
'task_params': 'task_params_1',
'task_hash': 'task_hash_1',
'task_log': 'task_log_1'
},
1: {
'task_name': 'task',
'last_modified': 'last_modified_2',
'task_params': 'task_params_1',
'task_hash': 'task_hash_1',
'task_log': 'task_log_1'
}
}
output = self.tb._get_tasks_dic(tasks_list=tasks_list)
self.assertDictEqual(output, target)
def test_get_task_df(self):
self.tb.tasks = {
'Task1': {
'task_name': 'task_name_1',
'last_modified': 'last_modified_1',
'task_params': 'task_params_1',
'task_hash': 'task_hash_1',
'task_log': 'task_log_1'
}
}
target = pd.DataFrame({
'task_id': ['Task1'],
'task_name': ['task_name_1'],
'last_modified': ['last_modified_1'],
'task_params': ['task_params_1'],
'task_hash': ['task_hash_1'],
'task_log': ['task_log_1']
})
output = self.tb.get_task_df(all_data=True)
pd.testing.assert_frame_equal(output, target)
target = pd.DataFrame({
'task_id': ['Task1'],
'task_name': ['task_name_1'],
'last_modified': ['last_modified_1'],
'task_params': ['task_params_1'],
})
output = self.tb.get_task_df(all_data=False)
| pd.testing.assert_frame_equal(output, target) | pandas.testing.assert_frame_equal |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: | pd.Timestamp("2013-05-25 00:00:00") | pandas.Timestamp |
import numpy as np
import pandas as pd
import seaborn as sns; sns.set(style="white", color_codes=True)
import matplotlib.pyplot as plt
import os
import sys
if not os.path.exists('input_file'):
print('*'*30)
print('Run this Python code in the outputs directory \n E.g. python ../make_joint_distribution_plots.py')
print('*'*30)
sys.exit(0)
batch_col = 0
for line in open('input_file'):
if not (line[0] == '#' or line == '\n'):
if line.split()[0].upper() == 'Data_file'.upper():
Data_filename = line.split()[1].rstrip();
if line.split()[0].upper() == 'File_format'.upper():
id, age_col, d_age_col, F_col, dF_col, Strat_col = int(line.split()[1]),int(line.split()[2]),int(line.split()[3]),int(line.split()[4]), int(line.split()[5]), int(line.split()[6])
if line.split()[0].upper() == 'Intensity_prior'.upper():
I_min,I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'True_data'.upper():
true_behaviour_file = line.split()[1]
if line.split()[0].upper() == 'Plotting_intensity_range'.upper():
I_min,I_max = float(line.split()[1]),float(line.split()[2])
if line.split()[0].upper() == 'Batch_generate_joint_distributions'.upper():
batch_col = int(line.split()[1])
if batch_col == 0:
print('Batch column not specified in inputfile')
sys.exit(0)
# Here is the filename of the (noisy) data file
filename = os.path.join(os.pardir,Data_filename)
# Number of bins for plotting
num_bins=30
# load the data
data = np.loadtxt(filename,usecols=(age_col, d_age_col, F_col, dF_col, batch_col), unpack=False, comments='#')
label = np.loadtxt(filename,usecols=(id), unpack=False, comments='#',dtype=np.str)
for index in range(0,data.shape[0]):
if data[index,4] == 1: #make a plot
print("Making joint plot for sample " + str(index))
noisy_pt = [data[index-1,0], data[index-1,2]]
errs = [data[index-1,1], data[index-1,3]]
# load the (true) data file if available
if 'true_behaviour_file' in locals():
data = np.loadtxt(os.path.join(os.pardir,true_behaviour_file), usecols=(0,2))
true_pt = [data[index-1,0], data[index-1,1]]
# load the relevant joint distribution file
dist = np.loadtxt('Joint_distribution_data/Sample_'+str('{:04}'.format(index))+'.dat')
joint_data = | pd.DataFrame(data=dist,columns=["age","intensity"]) | pandas.DataFrame |
import datetime
import logging
import pathlib
import typing
import xml.parsers.expat
from dataclasses import dataclass
from multiprocessing.dummy import Pool as ThreadPool
import pandas as pd
import pyetrade
import pytz
import requests.exceptions
from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
)
log = logging.getLogger(__name__)
VALID_INCREMENTS = [1, 2.5, 5, 10, 50, 100]
PUT_INFO_TO_INCLUDE = [
"bid",
"ask",
"lastPrice",
"volume",
"openInterest",
"OptionGreeks",
"strikePrice",
"symbol",
"optionType",
"netChange",
]
@dataclass
class MarketData:
ticker: str
company_name: str
market_price: float
high_52: float
low_52: float
percentile_52: float
beta: float
next_earnings_date: str
class OptionsManager:
def __init__(
self,
consumer_key: str,
consumer_secret: str,
oauth_token: str,
oauth_secret: str,
):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.oauth_token = oauth_token
self.oauth_secret = oauth_secret
self.market = pyetrade.ETradeMarket(
self.consumer_key,
self.consumer_secret,
self.oauth_token,
self.oauth_secret,
dev=False,
)
self.accounts = pyetrade.ETradeAccounts(
self.consumer_key,
self.consumer_secret,
self.oauth_token,
self.oauth_secret,
dev=False,
)
def get_csv_df(self):
sector_path = pathlib.Path(__file__).parent / "data" / "sectors.csv"
csv_df = | pd.read_csv(sector_path) | pandas.read_csv |
# Load libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
# Init settings
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVR, LinearSVR
from sklearn.tree import DecisionTreeRegressor
seed = 309
random.seed(seed)
np.random.seed(seed)
train_test_split_test_size = 0.3
# load data
def load_part_one():
df = pd.read_csv("../../data/Part 1 - regression/diamonds.csv")
return df
def data_preprocess(data):
"""
Data preprocess:
1. Split the entire dataset into train and test
2. Split outputs and inputs
3. Standardize train and test
4. Add intercept dummy for computation convenience
:param data: the given dataset (format: panda DataFrame)
:return: train_data train data contains only inputs
train_labels train data contains only labels
test_data test data contains only inputs
test_labels test data contains only labels
train_data_full train data (full) contains both inputs and labels
test_data_full test data (full) contains both inputs and labels
"""
# Categorical conversion
lb_make = LabelEncoder()
data['cut'] = lb_make.fit_transform(data['cut'])
data['color'] = lb_make.fit_transform(data['color'])
data['clarity'] = lb_make.fit_transform(data['clarity'])
# Split the data into train and test
train_data, test_data = train_test_split(data, test_size=train_test_split_test_size)
# Pre-process data (both train and test)
train_data_full = train_data.copy()
train_data = train_data.drop(["price"], axis=1)
train_data = train_data.drop(["depth"], axis=1)
train_data = train_data.drop(["table"], axis=1)
train_data = train_data.drop(["Unnamed: 0"], axis=1)
train_labels = train_data_full["price"]
test_data_full = test_data.copy()
test_data = test_data.drop(["price"], axis=1)
test_data = test_data.drop(["depth"], axis=1)
test_data = test_data.drop(["table"], axis=1)
test_data = test_data.drop(["Unnamed: 0"], axis=1)
test_labels = test_data_full["price"]
# Standardize the inputs
train_mean = train_data.mean()
train_std = train_data.std()
train_data = (train_data - train_mean) / train_std
test_data = (test_data - train_mean) / train_std
# Tricks: add dummy intercept to both train and test
train_data['intercept_dummy'] = | pd.Series(1.0, index=train_data.index) | pandas.Series |
import pandas as pd
import matplotlib.pyplot as plt
import kmertools as kt
transitions_path = '/Users/simonelongo/Documents/QuinlanLabFiles/kmer_data/results/kp_21oct2019/bp_counts_per3mer.csv'
counts_path = '/Users/simonelongo/Documents/QuinlanLabFiles/kmer_data/data/ref_genome_kmer_freq.csv'
counts = pd.read_csv(counts_path, index_col=0)
counts.columns = ['count']
transitions = pd.read_csv(transitions_path, index_col=0)
merged = transitions.join(counts, how='inner')
freq = merged.iloc[:, 0:4].div(merged['count'], axis=0)
flank_count_AG = pd.DataFrame()
flank_count_CT = | pd.DataFrame() | pandas.DataFrame |
import glob
import pandas as pd
import numpy as np
files = glob.glob("*.csv")
print(len)
results =[]
for file in files:
df = pd.read_csv(file,index_col=None,header=None,dtype=float)
results.append([file.replace("Gridsearch_","").replace(".csv","")] +df.iloc[int(df[5].idxmax()),:].values.tolist())
df1 = pd.DataFrame(results)
len(df)
files = glob.glob("results_full/*.csv")
results =[]
for file in files:
df = pd.read_csv(file,index_col=None,header=None,dtype=float)
results.append([file.replace("Gridsearch_","").replace(".csv","")] +df.iloc[int(df[5].idxmax()),:].values.tolist())
df2 = pd.DataFrame(results)
results = []
for one,two in zip(df1.values,df2.values):
if one[-1] < two[-1]:
results.append(two)
else:
results.append(one)
df = | pd.DataFrame(results) | pandas.DataFrame |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays.numpy_ import PandasDtype
from .base import BaseExtensionTests
class BaseSetitemTests(BaseExtensionTests):
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[0] = data[1]
assert data[0] == data[1]
def test_setitem_sequence(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[[0, 1]] = [data[1], data[0]]
assert data[0] == original[1]
assert data[1] == original[0]
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
original = ser.copy()
value = [data[0]]
if as_array:
value = data._from_sequence(value)
xpr = "cannot set using a {} indexer with a different length"
with pytest.raises(ValueError, match=xpr.format("list-like")):
ser[[0, 1]] = value
# Ensure no modifications made before the exception
self.assert_series_equal(ser, original)
with pytest.raises(ValueError, match=xpr.format("slice")):
ser[slice(3)] = value
self.assert_series_equal(ser, original)
def test_setitem_empty_indxer(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[np.array([], dtype=int)] = []
self.assert_equal(data, original)
def test_setitem_sequence_broadcasts(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[[0, 1]] = data[2]
assert data[0] == data[2]
assert data[1] == data[2]
@pytest.mark.parametrize("setter", ["loc", "iloc"])
def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
setter = getattr(arr, setter)
operator.setitem(setter, 0, data[1])
assert arr[0] == data[1]
def test_setitem_loc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.loc[0, "B"] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_loc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.iloc[0, 1] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_iloc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.iloc[10, 0] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.iloc[10, 1] = data[1]
assert df.loc[10, "B"] == data[1]
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
arr = data[:5].copy()
expected = arr.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[mask] = data[0]
self.assert_equal(expected, arr)
def test_setitem_mask_raises(self, data, box_in_series):
# wrong length
mask = np.array([True, False])
if box_in_series:
data = pd.Series(data)
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:3] = True
mask[3:5] = pd.NA
if box_in_series:
data = pd.Series(data)
data[mask] = data[0]
assert (data[:3] == data[0]).all()
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = | pd.Series(arr) | pandas.Series |
import random
from remi import App, start
import os
import remi.gui as tk
from creators import C
import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from getbankdata import GetBankData
import glob
import pandas as pd
from user import User
from threading import Thread
from run_saved_model import load_test_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
pd.options.display.max_rows = 500
pd.options.display.max_columns = 40
'''
MAIN_V4.PY
Main_v2 was last stable build.
Main_v3 was copy of 2, but was not being tracked on git, 4 is created.
- Swap DR with CR in Kotak
- Check Dropdown of Banklist feasibility and add Kotak
- Modify code to add new frame for filtering options
- Use the choosefile to choose file instead of hardcoded path
- Add Axis Bank
- Add Kotak Bank
- Add HDFC
- Clean out Directory
- Retrain Model
- Add Filters to view graphs on transaction TYPES and PRED_CAT
+ Do the hashlib for password or pickling
+ Revert to not having the entire code in main but modularize it.
'''
class BankStatementAnalyzer(App):
def __init__(self, *args):
super(BankStatementAnalyzer, self).__init__(*args, static_file_path={'path': './resx/'})
self.bank_list = ['Select Bank', 'Axis Bank', 'HDFC Bank', 'Kotak Mahindra Bank', 'ICICI Bank']
self.date = datetime.date.today().strftime('%d-%m-%Y')
self.time = datetime.datetime.now().time()
self.model_name = 'model_ann_98.h5'
self.cv_name = 'vectorizer.sav'
self.le_name = 'target_label_encoder.sav'
def idle(self):
pass
def main(self):
'''All the inits will happen in main function if the entire GUI
is being rendered from the BankStatementAnalyzer class, else
the Init of the new class can be created as a separte py file and
it's class must be initialized in this main, with self as its argument.'''
self.date = datetime.date.today().strftime('%d-%m-%Y')
self.time = datetime.datetime.now().time()
self.listview = tk.ListView()
self.frame_left = tk.Container()
self.frame_filter = tk.Container()
self.frame_right = tk.Container()
self.frame_header = tk.Container()
self.frame_right_2 = tk.Container()
self.master_user = pd.DataFrame()
self.window = tk.Container()
self.window.css_background_color = "azure"
self.window.css_height = "100%"
self.window.css_width = "100%"
self.window.css_left = "0.0px"
self.window.css_top = "0.0px"
self.window.css_position = "absolute"
self.frame_header_color = 'cornflowerblue'
self.frame_left_color = 'ivory'
self.frame_filter_color = 'whitesmoke'
self.frame_footer_left_color = 'honeydew'
self.frame_right_color = 'whitesmoke'
self.frame_right_2_color = 'seashell'
self.frame_login_register_color = 'azure'
self.selected_bank = []
self.registry_info = {}
self.login_info = {}
self.dt = pd.DataFrame()
self.dx = pd.DataFrame()
self.dr = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Utility functions for the RBM
Created on Fri May 10 2019
@author: <NAME>, IST
version: 1.0
"""
import numpy as np
import pandas as pd
import subprocess
import torch
from torch.utils.data import SubsetRandomSampler
import argparse
def parse_general_file(filepath, n_features, n_time_points, task, input_feat_values=0, ref_col_name=0, sep_symbol=',', label_column=-1):
'''Parse a general file to comply to the default format to be used in the
RBM-tDBN model.
Parameters
----------
filepath : String
Path of the file to be parsed.
n_features: int
Number of features of the dataset.
n_time_points: int
Number of time points in the dataset.
task: char
Task to be performed by the model, learning, classification or
prediction. Both classification and prediction require the existence of
labels.
input_feat_values: list, default 0
Values of the different features present in the dataset.
ref_col_name : string, default 0
Name of the reference column, 'subject_id' if possible.
sep_symbol: char, default ','
Separation symbol on the file to be parsed.
label_column: int, default -1
Column referring to the labels.
Returns
-------
df : dataframe
Dataframe of the parsed file.
labels: list
Different labels present on the dataset.
feat_values: list
Set of values taken by the features present in the dataset.
'''
if input_feat_values == 0:
feat_values = []
label_values = []
for i in range(n_features):
feat_values.append([])
else:
feat_values=input_feat_values
if ref_col_name == 0:
df = pd.read_csv(filepath+'.csv', index_col=False, sep=sep_symbol, header=0)
else:
df = pd.read_csv(filepath+'.csv', index_col=ref_col_name, sep=sep_symbol,header=0)
df.index.name = 'subject_id'
labels = pd.DataFrame(data=df.values[:,label_column], # values
index=df.index, # 1st column as index
columns=['label'])
labels.index.name = 'subject_id'
if task == 'c':
df.rename(columns={df.columns[label_column]: 'label'}, inplace=True)
labels.index.name = 'subject_id'
df.drop(columns=['label'], inplace=True)
i=1
time=0
for y in range(len(df.columns)):
df.rename(columns={df.columns[y]: 'X'+str(i)+'__'+str(time)}, inplace=True)
i+=1
if i >= n_features+1:
i=0
time+=1
i=0
for x in df:
for y in range(len(df[x])):
if input_feat_values == 0:
if df[x][y] not in feat_values[i]:
feat_values[i].append(df[x][y])
df[x][y]=feat_values[i].index(df[x][y])
i+=1
if i >= n_features:
i=0
if task == 'c':
for y in range(len(labels)):
if labels['label'][y] not in label_values:
label_values.append(labels['label'][y])
labels['label'][y] = label_values.index(labels['label'][y])
labels.to_csv(filepath+'_target.csv',quoting=1)
df.to_csv(filepath+'_parsed.csv',quoting=1)
outF = open(filepath+'_dic.txt', "w")
for i in range(1, n_features+1):
outF.write('Feature ' + str(i) + ' has ' + str(len(feat_values[i])) + ' different values\n')
for j in range(len(feat_values[i])):
outF.write(str(j) + ': ' + str(feat_values[i][j]) + '\n')
if task=='c':
outF.write('Labels have ' + str(len(label_values)) + ' different values\n')
for j in range(len(label_values)):
outF.write(str(j) + ': ' + str(label_values[j]) + '\n')
outF.close()
return df, labels, feat_values
def create_parser(*args):
''' Creates a parser to analyze information given by the user when running
the program from terminal.
Returns
----------
parser: argparse.ArgumentParser
Parser which will be use to extract the information.
'''
parser = argparse.ArgumentParser()
parser.add_argument('task')
parser.add_argument('filepath')
parser.add_argument('-tdbnp','--tdbn_parents', type=int, default=1, help='set the number of parent nodes to be considered by tDBN')
parser.add_argument('-hu','--hidden_units', type=int, default=3, help='set the number of hidden units')
parser.add_argument('-bs','--batch_size_ratio', type=float, default = 0.1, help='set the batch size ratio')
parser.add_argument('-cd','--contrastive_divergence', type=int, default=1, help='set k in cd-k')
parser.add_argument('-e','--epochs', type=int, default = 100, help='set the number of epochs')
parser.add_argument('-lr','--learning_rate', type=float, default = 0.05, help='set the learning rate')
parser.add_argument('-wd','--weight_decay', type=float, default = 1e-4, help='set the weight decay')
parser.add_argument('-tsr','--test_set_ratio', type=float, default = 0.2, help='set the ratio for the test set')
parser.add_argument('-vsr','--validation_set_ratio', type=float, default = 0.1, help='set the ratio for the validation set')
parser.add_argument('-pcd','--persistent_cd', type=bool, default = False, help='activate persistent contrastive divergence')
parser.add_argument('-v','--version',action='version', version='%(prog)s 2.0')
parser.add_argument('-vb','--verbose',dest='verbose', default = False, action='store_true',help='enables additional printing')
parser.add_argument('-nr','--number_runs', type=int, default = 10, help='number of repetitions')
parser.add_argument('-vr','--validation_runs', type=int, default = 5, help='number of repetitions on the validation cycle')
parser.add_argument('-er','--extraction_runs', type=int, default = 5, help='number of extractions in each validation cycle')
parser.add_argument('-nrbm','--no_rbm', type=bool, default = False, help='if True, RBM is not used')
return parser
def count_labels(filepath):
''' Reads the file containing the labels and returns some information.
Parameters
----------
filepath : string
Path to the label file.
Returns
----------
labels: dataframe
Dataframe containing all the label information.
label_values: list
List with the different label values.
label_indices:
Different subject indices corresponding to each label.
'''
labels = pd.read_csv(filepath + '_target.csv', index_col= 'subject_id', header=0)
label_values = []
label_indices = []
for y in labels.index:
if labels['label'][y] not in label_values:
label_values.append(labels['label'][y])
label_indices.append([])
label_indices[label_values.index(labels['label'][y])].append(y)
else:
label_indices[label_values.index(labels['label'][y])].append(y)
return labels, label_values, label_indices
def parse_file_one_hot(df, n_features, n_time_points, labels=None, n_diff = None):
''' Performs one-hot encoding on a dataset.
Parameters
----------
df : dataframe
Dataframe with the dataset to be encoded.
n_features: int
Number of features.
n_time_points: int
Number of time points.
labels: list, default None
Labels corresponding to each subject.
n_diff: list, default None
Different values for each feature.
Returns
----------
labels: dataframe
Dataframe containing all the label information.
label_values: list
List with the different label values.
label_indices:
Different subject indices corresponding to each label.
'''
if n_diff is None:
v_max=np.zeros(n_features)
v_min=np.zeros(n_features)
i=0
for x in df:
if max(df[x]) > v_max[i]:
v_max[i] = max(df[x])
i+=1
if i >= n_features:
i=0
v_max = v_max.astype(int)
v_min = v_min.astype(int)
v_diff = v_max-v_min #different values for the features
v_diff = v_diff.astype(int)
n_diff = (v_diff + 1)
subjects = df.shape[0]
encoded_data = np.zeros((subjects*n_time_points,sum(n_diff)))
ext_labels = np.zeros((subjects*n_time_points))
col_aux=0
time=0
for x in df: #iterate on the features and time
for y in df.index: #iterate on the subjects
encoded_data[subjects*time+y-df.index[0]][sum(p for p in n_diff[0:col_aux])+df[x][y].astype(int)]=1
if labels is not None:
ext_labels[subjects*time+y-df.index[0]] = labels[y-df.index[0]]
#training_data[subjects*time_step+y][sum(p for p in n_diff[0:col_aux])+df[x][y]]=1
col_aux+=1
if col_aux >= n_features:
col_aux = 0
time +=1
return encoded_data, n_diff, ext_labels
def create_train_sets(dataset, label_indices=0, test_train_ratio=0.2, validation_ratio=0.1, batch_size=32, get_indices=True,
random_seed=42, shuffle_dataset=True, label_ratio = False):
'''Distributes the data into train, validation and test sets and returns the respective data loaders.
Parameters
----------
dataset : torch.utils.data.Dataset
Dataset object which will be used to train, validate and test the model.
test_train_ratio : float, default 0.2
Number from 0 to 1 which indicates the percentage of the data
which will be used as a test set. The remaining percentage
is used in the training and validation sets.
validation_ratio : float, default 0.1
Number from 0 to 1 which indicates the percentage of the data
from the training set which is used for validation purposes.
A value of 0.0 corresponds to not using validation.
batch_size : integer, default 32
Defines the batch size, i.e. the number of samples used in each
training iteration to update the model's weights.
get_indices : bool, default True
If set to True, the function returns the dataloader objects of
the train, validation and test sets and also the indices of the
sets' data. Otherwise, it only returns the data loaders.
random_seed : integer, default 42
Seed used when shuffling the data.
shuffle_dataset : bool, default True
If set to True, the data of which set is shuffled.
label_indices: array, default 0
Data indices for the different labels.
label_ratio: bool, default False
Whether to maintain or not the each label's ratio when creating the
sets.
Returns
-------
train_data : torch.Tensor
Data which will be used during training.
val_data : torch.Tensor
Data which will be used to evaluate the model's performance
on a validation set during training.
test_data : torch.Tensor
Data which will be used to evaluate the model's performance
on a test set, after finishing the training process.
'''
# Create data indices for training and test splits
if label_ratio:
test_split = []
val_split = []
for label in range(len(label_indices)):
test_split.append([])
val_split.append([])
test_split[label] = int(np.floor(test_train_ratio * len(label_indices[label])))
val_split[label] = int(np.floor(validation_ratio * (1-test_train_ratio) * len(label_indices[label])))
if shuffle_dataset:
#np.random.seed(random_seed)
for label in range(len(label_indices)):
np.random.shuffle(label_indices[label])
for label in range(len(label_indices)):
if label == 0:
train_indices = label_indices[label][test_split[label]+val_split[label]:]
val_indices = label_indices[label][test_split[label]:test_split[label]+val_split[label]]
test_indices = label_indices[label][:test_split[label]]
else:
train_indices.extend(label_indices[label][test_split[label]:])
val_indices.extend(label_indices[label][test_split[label]:test_split[label]+val_split[label]])
test_indices.extend(label_indices[label][:test_split[label]])
if shuffle_dataset:
np.random.shuffle(test_indices)
np.random.shuffle(train_indices)
np.random.shuffle(val_indices)
else:
dataset_size = len(dataset)
indices = list(range(dataset_size))
test_split = int(np.floor(test_train_ratio * dataset_size))
if shuffle_dataset:
#np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[test_split:], indices[:test_split]
# Create data indices for training and validation splits
train_dataset_size = len(train_indices)
val_split = int(np.floor(validation_ratio * train_dataset_size))
if shuffle_dataset:
#np.random.seed(random_seed)
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
train_indices, val_indices = train_indices[val_split:], train_indices[:val_split]
# Create data samplers
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
test_sampler = SubsetRandomSampler(test_indices)
# Create dataloaders for each set, which will allow loading batches
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=val_sampler)
test_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler)
if get_indices:
# Return the data loaders and the indices of the sets
return train_dataloader, val_dataloader, test_dataloader, train_indices, val_indices, test_indices
else:
# Just return the data loaders of each set
return train_dataloader, val_dataloader, test_dataloader
def weight_analyzer(weights, feat_values, threshold):
''' Analyze the weights of the restriced Boltzmann machine
Parameters
----------
weights : list
Weights learned for the RBM
feat_values : list
Values of each feature, used for representation and helping the user
interpretation.
threshold: float
Percentage of the maximum in order to consider that a feature is
important for that hidden unit.
'''
n_features = len(feat_values)
print('Units in the same group have higher probability of being active together,'+
'while units in different groups have lower probability of being active together \n')
max_weight = float(np.absolute(weights).max())
for j in range(0,weights.shape[1]):
pos_result = []
neg_result = []
#max_weight = max(np.absolute(weights[:,j]))
for i in range(0,weights.shape[0]):
if np.absolute(weights[i,j]) > max_weight*threshold:
if weights[i,j] > 0:
pos_result.append(i)
else:
neg_result.append(i)
print('\nH' + str(j))
print('+')
for i in pos_result:
print(str(i) + ': X' + str(i%n_features) + '=' + str(feat_values[i%n_features][int(np.floor(i/n_features))]))
print('-')
for i in neg_result:
print(str(i) + ': X' + str(i%n_features) + '=' + str(feat_values[i%n_features][int(np.floor(i/n_features))]))
def jarWrapper(*args):
''' Method used to run a Java program from a Python script and get its
results.
Returns
----------
ret: String
Results given by the Java program executed.
'''
process = subprocess.Popen(['java', '-jar']+list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
line = line.decode("utf-8")
if line != '' and line.endswith('\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
ret += stdout.split('\n')
if stderr != '':
ret += stderr.split('\n')
ret.remove('')
return ret
def check_int(c):
try:
int(c)
return True
except ValueError:
return False
def parse_dic(filepath):
dic = open(filepath+'_dic.txt','r')
feat_values = []
for line in dic:
if line[0] == '0':
line_feat=[]
feat_values.append(line_feat)
value = line.split(': ')[1][:-1]
line_feat.append(value)
elif check_int(line[0]):
value = line.split(': ')[1][:-1]
line_feat.append(value)
elif line[0] == 'L':
break
return feat_values
def reverse_one_hot(data, feat_values):
if len(data.shape)==2:
ret=[[] for x in range(data.shape[0])]
n_instances= data.shape[0]
else:
ret=[]
n_instances= 1
i=0
for feature in feat_values:
j=0
for value in feature:
if n_instances > 1:
for k in range(n_instances):
if data[k][i*len(feature)+j] == 1:
ret[k].append(value)
else:
if data[i*len(feature)+j] == 1:
ret.append(value)
j+=1
i+=1
return ret
def parse_labels(filepath):
df = | pd.read_csv(filepath+'_class.csv', index_col="subject_id", sep=',', header=0) | pandas.read_csv |
import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
| tm.assert_series_equal(expected, td * other) | pandas.util.testing.assert_series_equal |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = | concat([df, df], axis=0) | pandas.concat |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = | Period(freq='M', year=2007, month=1) | pandas.tseries.period.Period |
# Provides code to perform multiclustering
#TODO was just messing around, nothing crazy here
from sklearn.cluster import KMeans
import data_processing.dataUtils as dataUtils
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import pandas as pd
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 1000) | pandas.set_option |
"""
Function and classes used to identify barcodes
"""
from typing import *
import pandas as pd
import numpy as np
import pickle
import logging
from sklearn.neighbors import NearestNeighbors
# from pynndescent import NNDescent
from pathlib import Path
from itertools import groupby
from pysmFISH.logger_utils import selected_logger
from pysmFISH.data_models import Output_models
from pysmFISH.errors import Registration_errors
class simplify_barcodes_reference():
"""Utility Class use to convert excels files with codebook info
in smaller size pandas dataframe/parquet files to pass to dask
workers during the processing. This utility function must be
run before running the experiment analysis. The pipeline
require the output of this function.
"""
def __init__(self, barcode_fpath: str):
"""Class initialization
Args:
barcode_fpath (str): Path to the xlsx file with the codebook
"""
self.barcode_fpath = Path(barcode_fpath)
self.barcode_fname = self.barcode_fpath.stem
@staticmethod
def format_codeword(codeword: str):
"""[summary]
Args:
codeword (str): codeword representing a gene
Returns:
byte: codeword converted in byte representation
"""
str_num = codeword.split('[')[-1].split(']')[0]
converted_codeword = np.array([int(el) for el in list(str_num)]).astype(np.int8)
converted_codeword = converted_codeword.tobytes()
return converted_codeword
def convert_codebook(self):
used_gene_codebook_df = pd.read_excel(self.barcode_fpath)
# used_gene_codebook_df = pd.read_parquet(self.barcode_fpath)
self.codebook_df = used_gene_codebook_df.loc[:,['Barcode','Gene']]
self.codebook_df.rename(columns = {'Barcode':'Code'}, inplace = True)
self.codebook_df.Code = self.codebook_df.Code.apply(lambda x: self.format_codeword(x))
self.codebook_df.to_parquet(self.barcode_fpath.parent / (self.barcode_fname + '.parquet'))
def dots_hoods(coords: np.ndarray,pxl: int)->np.ndarray:
"""Function that calculate the coords of the peaks searching
neighborhood for identifying the barcodes.
Args:
coords (np.ndarray): coords of the identified peaks
pxl (int): size of the neighborhood in pixel
Returns:
np.ndarray: coords that define the neighborhood (r_tl,r_br,c_tl,c_tr)
"""
r_tl = coords[:,0]-pxl
r_br = coords[:,0]+pxl
c_tl = coords[:,1]-pxl
c_tr = coords[:,1]+pxl
r_tl = r_tl[:,np.newaxis]
r_br = r_br[:,np.newaxis]
c_tl = c_tl[:,np.newaxis]
c_tr = c_tr[:,np.newaxis]
chunks_coords = np.hstack((r_tl,r_br,c_tl,c_tr))
chunks_coords = chunks_coords.astype(int)
return chunks_coords
def extract_dots_images(barcoded_df: pd.DataFrame,registered_img_stack: np.ndarray,
experiment_fpath: str, metadata: dict):
"""Function used to extract the images corresponding to a barcode
after running the decoding identification. It can save the images
but to avoid increasing too much the space occupied by a processed
experiment an array with the maximum intensity value of the pxl in
each round is calculated and saved
Args:
barcoded_df (pd.DataFrame): Dataframe with decoded barcodes
for a specific field of view.
registered_img_stack (np.ndarray): Preprocessed image of a single field of view
the imaging round correspond to the z-stack position
experiment_fpath (str): Path to the folder of the experiment to process
metadata (dict): Overall experiment info
"""
round_intensity_labels = ['bit_' + str(el) +'_intensity' for el in np.arange(1,int(metadata['total_rounds'])+1)]
if isinstance(registered_img_stack, np.ndarray) and (barcoded_df.shape[0] >1):
experiment_fpath = Path(experiment_fpath)
barcodes_names = barcoded_df['barcode_reference_dot_id'].values
coords = barcoded_df.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
barcodes_extraction_resolution = barcoded_df['barcodes_extraction_resolution'].values[0]
chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
chunks_coords[chunks_coords<0]=0
chunks_coords[chunks_coords>registered_img_stack.shape[1]]= registered_img_stack.shape[1]
for idx in np.arange(chunks_coords.shape[0]):
selected_region = registered_img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
if selected_region.size >0:
max_array = selected_region.max(axis=(1,2))
barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# for channel in channels:
# all_regions[channel] = {}
# all_max[channel] = {}
# img_stack = registered_img_stack[channel]
# trimmed_df_channel = trimmed_df.loc[trimmed_df.channel == channel]
# if trimmed_df_channel.shape[0] >0:
# barcodes_names = trimmed_df_channel['barcode_reference_dot_id'].values
# coords = trimmed_df_channel.loc[:, ['r_px_registered', 'c_px_registered']].to_numpy()
# barcodes_extraction_resolution = trimmed_df_channel['barcodes_extraction_resolution'].values[0]
# chunks_coords = dots_hoods(coords,barcodes_extraction_resolution)
# chunks_coords[chunks_coords<0]=0
# chunks_coords[chunks_coords>img_stack.shape[1]]= img_stack.shape[1]
# for idx in np.arange(chunks_coords.shape[0]):
# selected_region = img_stack[:,chunks_coords[idx,0]:chunks_coords[idx,1]+1,chunks_coords[idx,2]:chunks_coords[idx,3]+1]
# if selected_region.size >0:
# max_array = selected_region.max(axis=(1,2))
# # all_regions[channel][barcodes_names[idx]]= selected_region
# all_max[channel][barcodes_names[idx]]= max_array
# barcoded_df.loc[barcoded_df.dot_id == barcodes_names[idx],round_intensity_labels] = max_array
# fpath = experiment_fpath / 'tmp' / 'combined_rounds_images' / (experiment_name + '_' + channel + '_img_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_regions,open(fpath,'wb'))
# fpath = experiment_fpath / 'results' / (experiment_name + '_barcodes_max_array_dict_fov_' + str(fov) + '.pkl')
# pickle.dump(all_max,open(fpath,'wb'))
else:
barcoded_df.loc[:,round_intensity_labels] = np.nan
return barcoded_df
def identify_flipped_bits(codebook: pd.DataFrame, gene: str,
raw_barcode: ByteString)-> Tuple[ByteString, ByteString]:
"""Utility function used to identify the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for a single dot.
Args:
codebook (pd.DataFrame): Codebook used for the decoding
gene (str): Name of the gene identified
raw_barcode (ByteString): identifide barcode from the images
Returns:
Tuple[ByteString, ByteString]: (flipped_position, flipping_direction)
"""
gene_barcode_str =codebook.loc[codebook.Gene == gene, 'Code'].values[0]
gene_barcode = np.frombuffer(gene_barcode_str, np.int8)
raw_barcode = np.frombuffer(raw_barcode, np.int8)
flipped_positions = np.where(raw_barcode != gene_barcode)[0].astype(np.int8)
flipping_directions = (gene_barcode[flipped_positions] - raw_barcode[flipped_positions]).astype(np.int8)
# flipped_positions = flipped_positions.tobytes()
# flipping_directions = flipping_directions.tobytes()
return flipped_positions,flipping_directions
def define_flip_direction(codebook_dict: dict,experiment_fpath: str,
output_df: pd.DataFrame):
"""Function used to determinethe the position of the bits that are
flipped after the nearest neighbors and the definition of the
acceptable hamming distance for fov.
Args:
codebook (dict): Codebooks used for the decoding
experiment_fpath (str): Path to the folder of the experiment to process
output_df (pd.DataFrame): Dataframe with the decoded results for
the specific fov.
"""
if output_df.shape[0] > 1:
correct_hamming_distance = 0
selected_hamming_distance = 3 / output_df.iloc[0].barcode_length
experiment_fpath = Path(experiment_fpath)
experiment_name = experiment_fpath.stem
channels = codebook_dict.keys()
all_evaluated = []
for channel in channels:
codebook = codebook_dict[channel]
fov = output_df.fov_num.values[0]
trimmed_df = output_df.loc[(output_df.dot_id == output_df.barcode_reference_dot_id) &
(output_df.channel == channel) &
(output_df['hamming_distance'] > correct_hamming_distance) &
(output_df['hamming_distance'] < selected_hamming_distance),
['barcode_reference_dot_id', 'decoded_genes', 'raw_barcodes','hamming_distance']]
trimmed_df = trimmed_df.dropna(subset=['decoded_genes'])
trimmed_df.loc[:,('flip_and_direction')] = trimmed_df.apply(lambda x: identify_flipped_bits(codebook,x.decoded_genes,x.raw_barcodes),axis=1)
trimmed_df['flip_position'] = trimmed_df['flip_and_direction'].apply(lambda x: x[0])
trimmed_df['flip_direction'] = trimmed_df['flip_and_direction'].apply(lambda x: x[1])
trimmed_df.drop(columns=['flip_and_direction'],inplace=True)
all_evaluated.append(trimmed_df)
all_evaluated = pd.concat(all_evaluated,axis=0,ignore_index=True,inplace=True)
fpath = experiment_fpath / 'results' / (experiment_name + '_' + channel + '_df_flip_direction_fov' + str(fov) + '.parquet')
all_evaluated.to_parquet(fpath)
# return trimmed_df
def chunk_dfs(dataframes_list: list, chunk_size: int):
"""
Functions modified from
https://stackoverflow.com/questions/45217120/how-to-efficiently-join-merge-concatenate-large-data-frame-in-pandas
yields n dataframes at a time where n == chunksize
"""
dfs = []
for f in dataframes_list:
dfs.append(f)
if len(dfs) == chunk_size:
yield dfs
dfs = []
if dfs:
yield dfs
def merge_with_concat(dfs: list)->pd.DataFrame:
"""Utility function used to merge dataframes
Args:
dsf (list): List with the dataframe to merge
Returns:
pd.DataFrame: Merged dataframe
"""
# dfs = (df.set_index(col, drop=True) for df in dfs)
merged = pd.concat(dfs, axis=0, join='outer', copy=False)
return merged
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def extract_barcodes_NN_fast_multicolor(registered_counts_df: pd.DataFrame, analysis_parameters: Dict,
codebook_df: pd.DataFrame, metadata:dict)-> Tuple[pd.DataFrame,pd.DataFrame]:
"""Function used to extract the barcodes from the registered
counts using nearest neighbour. if there is a problem with the registration the barcode assigned
will be 0*barcode_length
Args:
registered_counts_df (pd.Dataframe): Fov counts after registration
analysis_parameters (Dict): Parameters for data processing
codebook_df (pd.DataFrame): codebook used to deconvolve the barcode
Returns:
Tuple[pd.DataFrame,pd.DataFrame]: (barcoded_round, all_decoded_dots_df)
"""
logger = selected_logger()
barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
barcode_length = metadata['barcode_length']
registration_errors = Registration_errors()
stitching_channel = metadata['stitching_channel']
registered_counts_df.dropna(subset=['dot_id'],inplace=True)
# Starting level for selection of dots
dropping_counts = registered_counts_df.copy(deep=True)
all_decoded_dots_list = []
barcoded_round = []
if registered_counts_df['r_px_registered'].isnull().values.any():
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return registered_counts_df, all_decoded_dots_df
else:
for ref_round_number in np.arange(1,barcode_length+1):
#ref_round_number = 1
reference_round_df = dropping_counts.loc[dropping_counts.round_num == ref_round_number,:]
# Step one (all dots not in round 1)
compare_df = dropping_counts.loc[dropping_counts.round_num!=ref_round_number,:]
if (not reference_round_df.empty):
if not compare_df.empty:
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_round_df[['r_px_registered','c_px_registered']])
dists, indices = nn.kneighbors(compare_df[['r_px_registered','c_px_registered']], return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_distances_below_resolution = np.where(dists <= barcodes_extraction_resolution)[0]
comp_idx = idx_distances_below_resolution
ref_idx = indices[comp_idx].flatten()
# Subset the dataframe according to the selected points
# The reference selected will have repeated points
comp_selected_df = compare_df.iloc[comp_idx]
ref_selected_df = reference_round_df.iloc[ref_idx]
# The size of ref_selected_df w/o duplicates may be smaller of reference_round_df if
# some of the dots in reference_round_df have no neighbours
# Test approach where we get rid of the single dots
comp_selected_df.loc[:,'barcode_reference_dot_id'] = ref_selected_df['dot_id'].values
ref_selected_df_no_duplicates = ref_selected_df.drop_duplicates()
ref_selected_df_no_duplicates.loc[:,'barcode_reference_dot_id'] = ref_selected_df_no_duplicates['dot_id'].values
# Collect singletons
# Remeber that this method works only because there are no duplicates inside the dataframes
# https://stackoverflow.com/questions/48647534/python-pandas-find-difference-between-two-data-frames
if reference_round_df.shape[0] > ref_selected_df_no_duplicates.shape[0]:
singletons_df = pd.concat([reference_round_df,ref_selected_df_no_duplicates]).drop_duplicates(keep=False)
singletons_df.loc[:,'barcode_reference_dot_id'] = singletons_df['dot_id'].values
barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates,singletons_df], axis=0,ignore_index=False)
else:
barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates], axis=0,ignore_index=False)
# barcoded_round = pd.concat([comp_selected_df, ref_selected_df_no_duplicates,singletons_df], axis=0,ignore_index=False)
barcoded_round_grouped = barcoded_round.groupby('barcode_reference_dot_id')
compare_df = compare_df.drop(comp_selected_df.index)
dropping_counts = compare_df
else:
# Collecting singleton of last bit
reference_round_df.loc[:,'barcode_reference_dot_id'] = reference_round_df['dot_id'].values
barcoded_round_grouped = reference_round_df.groupby('barcode_reference_dot_id')
ref_selected_df_no_duplicates = reference_round_df
for brdi, grp in barcoded_round_grouped:
barcode = np.zeros([barcode_length],dtype=np.int8)
barcode[grp.round_num.values.astype(np.int8)-1] = 1
#hamming_dist, index_gene = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
#gene= codebook_df.loc[index_gene.reshape(index_gene.shape[0]),'Gene'].tolist()
barcode = barcode.tostring()
if len(ref_selected_df_no_duplicates) != 0:
ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'raw_barcodes'] = barcode
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'decoded_gene_name'] = gene
#ref_selected_df_no_duplicates.loc[ref_selected_df_no_duplicates.barcode_reference_dot_id == brdi,'hamming_distance'] = hamming_dist.flatten()[0]
#fish_counts.loc[grp.index,'barcode_reference_dot_id'] = brdi
#fish_counts.loc[grp.index,'raw_barcodes'] = barcode
#dists, index = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
all_decoded_dots_list.append(ref_selected_df_no_duplicates)
if all_decoded_dots_list:
all_decoded_dots_df = pd.concat(all_decoded_dots_list,ignore_index=False)
codebook_df = convert_str_codebook(codebook_df,'Code')
codebook_array = make_codebook_array(codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
all_barcodes = np.vstack(all_decoded_dots_df.raw_barcodes.map(lambda x: np.frombuffer(x, np.int8)).values)
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
all_decoded_dots_df.loc[:,'decoded_genes'] = genes
all_decoded_dots_df.loc[:,'hamming_distance'] = dists_arr
all_decoded_dots_df.loc[:,'number_positive_bits'] = all_barcodes.sum(axis=1)
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
else:
all_decoded_dots_df = pd.DataFrame(columns = registered_counts_df.columns)
all_decoded_dots_df['decoded_genes'] = np.nan
all_decoded_dots_df['hamming_distance'] = np.nan
all_decoded_dots_df['number_positive_bits'] = np.nan
all_decoded_dots_df['barcode_reference_dot_id'] = np.nan
all_decoded_dots_df['raw_barcodes'] = np.nan
all_decoded_dots_df['barcodes_extraction_resolution'] = barcodes_extraction_resolution
# Save barcoded_round and all_decoded_dots_df
return barcoded_round, all_decoded_dots_df
# TODO Remove all the functions below
######## -------------------------------------------------------------------
class extract_barcodes_NN():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,file_tags,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.file_tags = file_tags
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
fov = self.file_tags['fov']
channel = self.file_tags['channel']
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(fov),'dot_channel':channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
# self.all_barcodes = {}
# for name, group in self.grpd:
# rounds_num = group.round_num.values
# dot_ids = group.dot_id.values
# rounds_num = rounds_num.astype(int)
# barcode = np.zeros([self.barcode_length],dtype=np.int8)
# barcode[(rounds_num-1)] += 1
# dists_arr, index_arr = nn_sklearn.kneighbors(barcode.reshape(1, -1), return_distance=True)
# gene=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()[0]
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# if dists_arr[0][0] == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif dists_arr[0][0] < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'raw_barcodes'] = barcode.tostring()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'all_Hdistance_genes'] = gene
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'number_positive_bits'] = barcode.sum()
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'hamming_distance'] = hd
# if hd == 0:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'0Hdistance_genes'] = gene
# elif hd < hd_2:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below2Hdistance_genes'] = gene
# elif hd < hd_3:
# self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,'below3Hdistance_genes'] = gene
fname = self.file_tags['experiment_fpath'] / 'tmp' / 'registered_counts' / (self.file_tags['experiment_name'] + '_' + self.file_tags['channel'] + '_decoded_fov_' + self.file_tags['fov'] + '.parquet')
self.barcoded_fov_df.to_parquet(fname,index=False)
class extract_barcodes_NN_test():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, fov, channel, counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df,status:str):
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = experiment_config['Barcode_length']
self.fov = fov
self.channel = channel
self.counts = counts
self.logger = selected_logger()
self.codebook_df = codebook_df
self.status = status
self.registration_errors = Registration_errors()
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
# ref_idx = indices[idx_selected_coords_compare]
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_fov_df = data_models.barcode_analysis_df
self.barcoded_fov_df.attrs = self.counts.attrs
if self.status == 'FAILED':
error = self.counts['min_number_matching_dots_registration'].values[0]
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':error,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num },ignore_index=True)
elif self.status == 'SUCCESS':
if (min(self.counts.loc[:,'min_number_matching_dots_registration']) < self.RegistrationMinMatchingBeads):
round_num = self.counts['round_num'].values[0]
self.barcoded_fov_df = self.barcoded_fov_df.append({'min_number_matching_dots_registration':registration_errors.registration_below_extraction_resolution,
'fov_num':int(self.fov),'dot_channel':self.channel,'round_num': round_num},ignore_index=True)
self.status = 'FAILED'
else:
hd_2 = 2 / self.barcode_length
hd_3 = 3 / self.barcode_length
# barcode_length = len(self.counts['round_num'].unique())
rounds = np.arange(1,self.barcode_length+1)
self.codebook_df = self.convert_str_codebook(self.codebook_df,'Code')
codebook_array = self.make_codebook_array(self.codebook_df,'Code')
nn_sklearn = NearestNeighbors(n_neighbors=1, metric="hamming")
nn_sklearn.fit(codebook_array)
# remove points with np.NAN
# self.counts = self.counts.dropna()
for round_num in rounds:
compare_df, barcoded_df = self.barcode_nn(self.counts, round_num, self.barcodes_extraction_resolution)
self.barcoded_fov_df = self.barcoded_fov_df.append(barcoded_df, ignore_index=True)
self.counts = compare_df
self.counts['barcode_reference_dot_id'] = self.counts.dot_id
self.barcoded_fov_df = self.barcoded_fov_df.append(self.counts, ignore_index=True)
self.barcoded_fov_df['barcodes_extraction_resolution'] = self.barcodes_extraction_resolution
self.grpd = self.barcoded_fov_df.groupby('barcode_reference_dot_id')
barcode_reference_dot_id_list = []
num_unique_dots = np.unique(self.barcoded_fov_df.loc[:,'barcode_reference_dot_id']).shape[0]
# There are no dots is the df
if num_unique_dots > 0:
all_barcodes = np.zeros([num_unique_dots,self.barcode_length],dtype=np.int8)
for idx, (name, group) in enumerate(self.grpd):
barcode_reference_dot_id_list.append(name)
barcode = np.zeros([self.barcode_length],dtype=np.int8)
rounds_num = group.round_num.values
rounds_num = rounds_num.astype(int)
barcode[(rounds_num-1)] += 1
all_barcodes[idx,:] = barcode
dists_arr, index_arr = nn_sklearn.kneighbors(all_barcodes, return_distance=True)
genes=self.codebook_df.loc[index_arr.reshape(index_arr.shape[0]),'Gene'].tolist()
for idx,name in enumerate(barcode_reference_dot_id_list):
barcode = all_barcodes[idx,:]
gene = genes[idx]
hd = dists_arr[idx][0]
cols = ['raw_barcodes','all_Hdistance_genes','number_positive_bits','hamming_distance'] # will add last column depending on hd
writing_data = [barcode.tostring(),gene,barcode.sum(),hd]
if hd == 0:
cols = cols + ['zeroHdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_2:
cols = cols + ['below2Hdistance_genes']
writing_data = writing_data + [gene]
if hd < hd_3:
cols = cols + ['below3Hdistance_genes']
writing_data = writing_data + [gene]
self.barcoded_fov_df.loc[self.barcoded_fov_df.barcode_reference_dot_id == name,cols] = writing_data
class extract_barcodes_NN_new():
"""
Class used to extract the barcodes from the registered
counts using nearest neighbour
Parameters:
-----------
counts: pandas.DataFrame
pandas file with the fov counts after
registration
analysis_parameters: dict
parameters for data processing
experiment_config: Dict
dictionary with the experimental data
codebook_df: pandas.DataFrame
pandas file with the codebook used to
deconvolve the barcode
NB: if there is a problem with the registration the barcode assigned
will be 0*barcode_length
"""
def __init__(self, registered_counts, analysis_parameters:Dict,experiment_config:Dict,codebook_df):
self.counts_df = registered_counts
self.analysis_parameters = analysis_parameters
self.experiment_config = experiment_config
self.codebook_df = codebook_df
self.logger = selected_logger()
self.barcodes_extraction_resolution = analysis_parameters['BarcodesExtractionResolution']
self.RegistrationMinMatchingBeads = analysis_parameters['RegistrationMinMatchingBeads']
self.barcode_length = self.counts_df.loc[0]['barcode_length']
self.registration_errors = Registration_errors()
self.stitching_channel = self.counts_df['stitching_channel'].iloc[0]
@staticmethod
def convert_str_codebook(codebook_df,column_name):
codebook_df[column_name] = codebook_df[column_name].map(lambda x: np.frombuffer(x, np.int8))
return codebook_df
@staticmethod
def make_codebook_array(codebook_df,column_name):
codebook_array = np.zeros((len(codebook_df[column_name]),codebook_df[column_name][0].shape[0]))
for idx, el in enumerate(codebook_df[column_name]):
row = codebook_df[column_name][idx]
row = row[np.newaxis,:]
codebook_array[idx,:] = row
return codebook_array
@staticmethod
def barcode_nn(counts_df, ref_round_number, barcodes_extraction_resolution):
column_names = list(counts_df.columns.values)
column_names = column_names.append('barcode_reference_dot_id')
barcoded_df = pd.DataFrame(columns=column_names)
reference_array = counts_df.loc[counts_df.round_num == ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
reference_round_df = counts_df.loc[counts_df.round_num == ref_round_number,:].reset_index(drop=True)
# Step one (all dots not in round 1)
coords_compare = counts_df.loc[counts_df.round_num != ref_round_number, ['r_px_registered','c_px_registered']].to_numpy()
compare_df = counts_df.loc[counts_df.round_num != ref_round_number,:].reset_index(drop=True)
if (reference_array.shape[0] >0) and (coords_compare.shape[0] >0):
# initialize network
nn = NearestNeighbors(n_neighbors=1, metric="euclidean")
nn.fit(reference_array)
# Get the nn
dists, indices = nn.kneighbors(coords_compare, return_distance=True)
# select only the nn that are below barcodes_extraction_resolution distance
idx_selected_coords_compare = np.where(dists <= barcodes_extraction_resolution)[0]
compare_selected_df = compare_df.loc[idx_selected_coords_compare,:]
compare_selected_df['barcode_reference_dot_id'] = np.nan
for k,v in groupby(idx_selected_coords_compare):
if len(list(v)) > 3:
print("key: '{}'--> group: {}".format(k, len(list(v))))
# ref_idx = indices[idx_selected_coords_compare].squeeze()
# compare_selected_df.loc[compare_selected_df.index.isin(idx_selected_coords_compare),'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
for idx in idx_selected_coords_compare:
ref_idx = indices[idx]
compare_selected_df.loc[idx,'barcode_reference_dot_id'] = reference_round_df.loc[ref_idx,'dot_id'].values[0]
reference_round_df['barcode_reference_dot_id'] = reference_round_df.dot_id
barcoded_df = barcoded_df.append([compare_selected_df, reference_round_df], ignore_index=True)
compare_df = compare_df.drop(compare_selected_df.index)
compare_df = compare_df.reset_index(drop=True)
return compare_df, barcoded_df
def run_extraction(self):
data_models = Output_models()
registration_errors = Registration_errors()
self.barcoded_spec = data_models.barcode_analysis_df
if not self.counts_df[self.counts_df['dot_id'].isnull()].empty:
print('shitty FOV')
self.all_combine_df = | pd.concat([self.counts_df,self.barcoded_spec],axis=1) | pandas.concat |
import itertools
import json
import ntpath
import os
import pandas as pd
import re
import spacy
from glob import glob
from pprint import PrettyPrinter
from sentence_transformers import SentenceTransformer
from string import punctuation
from tqdm import tqdm
from src.utils import normalize_punctuations
# Download SpaCy models if needed
spacy_model = 'en_core_web_sm'
try:
nlp = spacy.load(spacy_model)
except OSError:
print("\n\n\n Downloading SpaCy model ...")
spacy.cli.download(spacy_model)
nlp = spacy.load(spacy_model)
# Define useful directories
working_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root_dir = os.path.dirname(working_dir)
data_dir = os.path.join(root_dir, 'dataset')
# Define useful variables
word_pattern = re.compile(r'\w+')
punct_pattern = re.compile(f"[{punctuation}]")
printer = PrettyPrinter(indent=4)
sentiment_mapper = {"pos": 1, "positive": 1,
"neg": 2, "negative": 2,
"neu": 3, "neutral": 3,
"conflict": 4, }
category_mapper = {"Arrival Experience": 1,
"Room Services": 2,
"Breakfast": 3,
"Dinning": 4,
"Bar & Lounge": 5,
"F&B Services": 6,
"Hotel Services": 7,
"Others": 8, }
attribute_mapper = {"Speed of check-in/out": 1,
"Booking accuracy": 2,
"Room availability": 3,
"Timeliness of service": 4,
"Loyalty Recognition": 5,
"Staff were responsive to my needs": 6,
"Accuracy of delivery of service": 7,
"Staff attitude / Staff anticipation": 8,
"Tools & Equipment": 9,
"Operational Hours": 10,
"Food quality": 11,
"Food variety": 12,
"Food temperature": 13,
"Beverage quality": 14,
"Price / Value": 15,
"Service Culture": 16,
"Problem Identification": 17,
"Service Recovery": 18,
"Hotel Facilities": 19,
"Location & Transportation": 20,
"Parking": 21,
"Disabled-Friendliness": 22,
"Room Cleanliness": 23,
"Room Amenities": 24,
"Room Condition": 25,
"Room Odour": 26,
"Noise Pollution": 27,
"Air-Condition": 28,
"Internet Connectivity": 29,
"Pest": 30,
"Shower / Bath Experience": 31,
"Planning": 32,
"Cleaning Process": 33,
"Others": 34, }
attribute_replacement = {"Queue": "Timeliness of service",
"Knowledge of staff": "Staff were responsive to my needs",
"Food Snack / Menu": "Food variety",
"Food presentation": "Food variety",
"Beverages quality": "Beverage quality",
"Beverage": "Beverage quality",
"Operations Hours": "Operational Hours",
"Primary Issues": "Service Culture",
"Track, Measure & Sustain": "Problem Identification",
"Transportation": "Location & Transportation",
"IHG Way of Clean 5-S Cleaning Process": "Cleaning Process",
"Cleaning tools": "Cleaning Process",
"Audits": "Others",
"PMM": "Others",
"PMM tools": "Others",
"Application of tools": "Others"}
def encode_words_location(text: str) -> dict:
# print(f"\n{text}")
text = normalize_punctuations(text)
# Split sentence into phrases by punctuation
punct_locs = [(p.start(), p.end()) for p in punct_pattern.finditer(text)]
if len(punct_locs)==0:
phrases_dict = {0: [0, len(text), text]}
else:
phrases_dict = dict()
phrase_idx = 0
last_punct_end = 0
for p_i, punct_loc in enumerate(punct_locs):
current_punct_start, current_punct_end = punct_loc
if p_i == 0:
if current_punct_start > 0:
phrases_dict[phrase_idx] = [0, current_punct_start, text[:current_punct_start]]
phrase_idx += 1
elif p_i != 0:
phrases_dict[phrase_idx] = [last_punct_end, current_punct_start, text[last_punct_end:current_punct_start]]
phrase_idx += 1
phrases_dict[phrase_idx] = [current_punct_start, current_punct_end, text[current_punct_start:current_punct_end]]
phrase_idx += 1
if p_i == len(punct_locs)-1:
if current_punct_end < len(text):
phrases_dict[phrase_idx] = [current_punct_end, len(text)-1, text[current_punct_end:]]
last_punct_end = current_punct_end
# printer.pprint(phrases_dict)
# Split phrases into words (offset by sentence, not by current phrase)
words_dict = dict()
word_idx = 0
for phrase_idx in range(len(phrases_dict)):
phrase_start, phrase_end, phrase = phrases_dict[phrase_idx]
if phrase_end-phrase_start == 1: # case of punctuation
words_dict[word_idx] = phrases_dict[phrase_idx]
word_idx += 1
phrase_words_dict = {
w_i+word_idx: [w.start()+phrase_start, w.end()+phrase_start, w.group(0)] \
for w_i, w in enumerate(word_pattern.finditer(phrase))
}
word_idx += len(phrase_words_dict)
words_dict.update(phrase_words_dict)
# printer.pprint(words_dict)
# Convert word dictionary to word dataframe --> easy comparison
words_df = pd.DataFrame(data=words_dict).T
words_df.rename(columns={0: 'offset_start',
1: 'offset_end',
2: 'word'}, inplace=True)
# print(words_df)
# Sentencize
words_df['sentence_id'] = [0] * len(words_df)
sentences = [sent.text for sent in nlp(text).sents]
BoSs = [text.find(sent) for sent in sentences]
for sentence_id, bos in enumerate(BoSs):
if sentence_id == 0:
continue
words_df.loc[words_df.offset_start>=bos, 'sentence_id'] = sentence_id
return words_dict, words_df
def decode_words_location(located_words: pd.DataFrame,
annotations: list or tuple) -> list:
annotations = list(annotations)
n_words = len(located_words)
# located_words['word'] = located_words['word'].apply(lambda x: x.lower())
# Assign all words as BACKGROUND
located_words['aspect'] = [0] * n_words
located_words['opinion'] = [0] * n_words
located_words['sentiment'] = [0] * n_words
located_words['category'] = [0] * n_words
located_words['attribute'] = [0] * n_words
# If no annotation, all words are considered background - class=0
if len(annotations) < 1:
return located_words
for annotation in annotations:
offset_start, offset_end, label = annotation[:3]
# Assign all words in annotation as
# BEGIN code for Aspect & Opinion,
# polarity code for Sentiment,
# class code for Category & Attribute.
annotation_locs = (located_words['offset_start']>=offset_start) & (located_words['offset_end']<=offset_end)
if label == 'opinion':
located_words.loc[annotation_locs, 'opinion'] = 1
else:
category, attribute, polarity = label.split('_-_')
if attribute in attribute_replacement.keys():
attribute = attribute_replacement[attribute]
located_words.loc[annotation_locs, 'aspect'] = 1
located_words.loc[annotation_locs, 'sentiment'] = sentiment_mapper[polarity]
located_words.loc[annotation_locs, 'category'] = category_mapper[category]
located_words.loc[annotation_locs, 'attribute'] = attribute_mapper[attribute]
# Split Aspect & Opinion annotated words into BEGINNING and INSIDE
for r_i in range(n_words-1, 0, -1):
for col in ['opinion', 'aspect']:
if located_words.loc[r_i, col] == 0:
continue
if located_words.loc[r_i-1, col] == 1:
# if previous word is annotated as BEGINNING, flip current word to INSIDE
located_words.loc[r_i, col] = 2
# print(located_words)
return located_words
def window_slide(seq: list or tuple, window_size: int=2):
seq_iter = iter(seq)
seq_sliced = list(itertools.islice(seq_iter, window_size))
if len(seq_sliced) == window_size:
yield seq_sliced
for seq_el in seq_iter:
yield seq_sliced[1:] + [seq_el]
def process_word_level(annotation: dict,
lower_case: bool=False,
upper_case: bool=False,
capital_case: bool=False) -> pd.DataFrame:
text = annotation['data']
if lower_case:
text = text.lower()
elif upper_case:
text = text.upper()
elif capital_case:
sentences = [sent.text for sent in nlp(text).sents]
sentences = [sent.capitalize() for sent in sentences]
text = '. '.join(sentences)
_, located_words_df = encode_words_location(text)
words_annotated = annotation['label'] if 'label' in annotation.keys() else []
words_data = [a+[text[a[0]:a[1]]] for a in words_annotated]
words_labeled = decode_words_location(located_words_df, words_data)
words_labeled['doc_id'] = doc_id
words_labeled.reset_index(inplace=True)
words_labeled.rename(columns={'index': 'word_id'}, inplace=True)
return words_labeled[words_labeled.word!=' ']
def process_token_level(words_labeled: pd.DataFrame) -> pd.DataFrame:
words_df = words_labeled[['word_id', 'word']]
words = words_df.word.values.tolist()
text = ' '.join(words)
tokens = tokenizer.tokenize(text)
tokens_df = pd.DataFrame(columns=['token', 'word', 'word_id'])
word_offset, word_id, token_id = 0, 0, 0
word = words.pop(0)
for token_id, token in enumerate(tokens):
token_ = token[2:] if token.startswith('##') else token
if token_ == word[word_offset:word_offset+len(token_)]:
tokens_df.loc[token_id] = [token, word, word_id]
word_offset += len(token_)
else:
print(f"\n\t{doc_id}\t{token_}\t{word[:len(token_)]}")
if word_offset >= len(word) and len(words) > 0:
word = words.pop(0)
word_id += 1
word_offset = 0
tokens_labeled = tokens_df.merge(words_labeled, on=['word', 'word_id'], how='inner')
return tokens_labeled
if __name__ == "__main__":
# Load tokenizer
print(f"\n\n\nLoading Tokenizer ...")
sbert_version = 'distilUSE'
sbert_dir = os.path.join(root_dir, 'artefacts', sbert_version)
embedder = SentenceTransformer(sbert_dir)
tokenizer = embedder.tokenizer
# Processing
label_cols = ['document', 'num_sentences', 'token', 'target', 'opinion', 'target_polarity', 'category', 'attribute']
labels_df = | pd.DataFrame(columns=label_cols) | pandas.DataFrame |
import src.controllers as contr
import src.pre_processing as pp
import yfinance as yf
import pandas as pd
import numpy as np
import os
def correlation_testing():
tickers = | pd.DataFrame(["GME", "AMC", "KOSS", "NAKD", "BBBY", "NOK", "VIX"], columns=['Ticker']) | pandas.DataFrame |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
res = pd.to_numeric(pd.Series(idx, name="xxx"))
tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
pd.to_numeric(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
# Basic dtype support.
smallest_uint_dtype = np.dtype(np.typecodes["UnsignedInteger"][0])
# Support below np.float32 is rare and far between.
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast="float")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_signed_downcast(self, data, signed_downcast):
# see gh-13352
smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_invalid_data(self):
# If we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter.
data = ["foo", 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors="ignore",
downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
def test_ignore_downcast_neg_to_unsigned(self):
# Cannot cast to an unsigned integer
# because we have a negative number.
data = ["-1", 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast="unsigned")
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])
@pytest.mark.parametrize("data,expected", [
(["1.1", 2, 3],
np.array([1.1, 2, 3], dtype=np.float64)),
([10000.0, 20000, 3000, 40000.36, 50000, 50000.00],
np.array([10000.0, 20000, 3000,
40000.36, 50000, 50000.00], dtype=np.float64))
])
def test_ignore_downcast_cannot_convert_float(
self, data, expected, downcast):
# Cannot cast to an integer (signed or unsigned)
# because we have a float number.
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("downcast,expected_dtype", [
("integer", np.int16),
("signed", np.int16),
("unsigned", np.uint16)
])
def test_downcast_not8bit(self, downcast, expected_dtype):
# the smallest integer dtype need not be np.(u)int8
data = ["256", 257, 258]
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("dtype,downcast,min_max", [
("int8", "integer", [iinfo(np.int8).min,
iinfo(np.int8).max]),
("int16", "integer", [iinfo(np.int16).min,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int64).min,
iinfo(np.int64).max]),
('uint8', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max]),
('uint16', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max]),
('uint32', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max]),
('uint64', "unsigned", [iinfo(np.uint64).min,
iinfo(np.uint64).max]),
('int16', "integer", [iinfo(np.int8).min,
iinfo(np.int8).max + 1]),
('int32', "integer", [iinfo(np.int16).min,
iinfo(np.int16).max + 1]),
('int64', "integer", [iinfo(np.int32).min,
iinfo(np.int32).max + 1]),
('int16', "integer", [iinfo(np.int8).min - 1,
iinfo(np.int16).max]),
('int32', "integer", [iinfo(np.int16).min - 1,
iinfo(np.int32).max]),
('int64', "integer", [iinfo(np.int32).min - 1,
iinfo(np.int64).max]),
('uint16', "unsigned", [iinfo(np.uint8).min,
iinfo(np.uint8).max + 1]),
('uint32', "unsigned", [iinfo(np.uint16).min,
iinfo(np.uint16).max + 1]),
('uint64', "unsigned", [iinfo(np.uint32).min,
iinfo(np.uint32).max + 1])
])
def test_downcast_limits(self, dtype, downcast, min_max):
# see gh-14404: test the limits of each downcast.
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
assert series.dtype == dtype
def test_coerce_uint64_conflict(self):
# see gh-17007 and gh-17125
#
# Still returns float despite the uint64-nan conflict,
# which would normally force the casting to object.
df = pd.DataFrame({"a": [200, 300, "", "NaN", 30000000000000000000]})
expected = pd.Series([200, 300, np.nan, np.nan,
30000000000000000000], dtype=float, name="a")
result = | to_numeric(df["a"], errors="coerce") | pandas.to_numeric |
import os
import FactorTest.FactorTestBox as FB
from FactorTest.FactorTestPara import *
import pandas as pd
import numpy as np
from tqdm import tqdm
#
#A股日行情(放在第一列更新,同时会维护tradedate)
def getAShareEODPrices(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,TRADE_DT,'+','.join(infoDF['数据库键'])+' from wind.AShareEODPrices where TRADE_DT>='+str(int(starttime)))
sqlData.rename(columns={'S_INFO_WINDCODE':'code','TRADE_DT':'time'},inplace=True)
FB.saveDailyData(sqlData,infoDF)
#A股日行情1(放在第一列更新,同时会维护tradedate)
def getAShareEODPrices1(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,TRADE_DT,'+','.join(infoDF['数据库键'])+' from wind.AShareEODPrices where TRADE_DT>='+str(int(starttime)))
sqlData.rename(columns={'S_INFO_WINDCODE':'code','TRADE_DT':'time'},inplace=True)
FB.saveDailyData(sqlData,infoDF)
#A股日行情衍生指标
def AShareEODDerivativeIndicator(infoDF):
for ind in tqdm(FB.partition(infoDF.index,5)):
info_loc=infoDF.loc[ind]
starttime=FB.getUpdateStartTime(info_loc['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,TRADE_DT,'+','.join(info_loc['数据库键'])+' from wind.AShareEODDerivativeIndicator where TRADE_DT>='+str(int(starttime)))
sqlData.rename(columns={'S_INFO_WINDCODE':'code','TRADE_DT':'time'},inplace=True)
FB.saveDailyData(sqlData,info_loc)
#A股财务数据
def AShareFinancialIndicator(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,REPORT_PERIOD,'+','.join(infoDF['数据库键'])+' from wind.AShareFinancialIndicator where REPORT_PERIOD>='+str(int(starttime)))
sqlData.rename(columns={'S_INFO_WINDCODE':'code','REPORT_PERIOD':'time'},inplace=True)
FB.saveFinData(sqlData,infoDF)
#sw指数成分股
def AShareSWIndustriesClass(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,SW_IND_CODE,ENTRY_DT,REMOVE_DT from wind.AShareSWIndustriesClass where ENTRY_DT>='+str(int(starttime)))
sqlData.columns=['code',infoDF['数据库键'].iloc[0],'time','eddate']
FB.saveIndData(sqlData,infoDF)
#sw指数价格
def ASWSIndexEOD(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_INFO_WINDCODE,TRADE_DT,'+','.join(infoDF['数据库键'])+' from wind.ASWSIndexEOD where TRADE_DT>='+str(int(starttime)))
sqlData.rename(columns={'S_INFO_WINDCODE':'code','TRADE_DT':'time'},inplace=True)
FB.saveDailyData(sqlData,infoDF)
def getHS300Weight(infoDF):
starttime=FB.getUpdateStartTime(infoDF['最新时间'])
sqlData=FB.getSql('select S_CON_WINDCODE,TRADE_DT,I_WEIGHT \
from wind.AIndexHS300CloseWeight where TRADE_DT>'+str(int(starttime)))
sqlData.rename(columns={'S_CON_WINDCODE':'code','TRADE_DT':'time'},inplace=True)
FB.saveDailyData(sqlData,infoDF)
def readWindData(ans,ind='Times',col='Codes'):
return pd.DataFrame(ans.Data,index=getattr(ans,ind),columns=getattr(ans,col))
def getZZ500EWeight(infoDF):
starttime=str(FB.getUpdateStartTime(infoDF['最新时间']))
starttime=starttime[:4]+'-'+starttime[4:6]+'-'+starttime[6:]
from WindPy import w
w.start()
ans=w.wset("indexhistory","startdate="+starttime+";enddate=2100-12-31;windcode=000905.SH")
w.close()
try:
ans=readWindData(ans,'Fields','Codes').T[['tradedate','tradecode','tradestatus']]
except:
ans=pd.DataFrame(index=['tradedate','tradecode','tradestatus']).T
FB.saveIndexComponentData(ans, infoDF)
def getZZ1000EWeight(infoDF):
starttime=str(FB.getUpdateStartTime(infoDF['最新时间']))
starttime=starttime[:4]+'-'+starttime[4:6]+'-'+starttime[6:]
from WindPy import w
w.start()
ans=w.wset("indexhistory","startdate="+starttime+";enddate=2100-12-31;windcode=000852.SH")
w.close()
try:
ans=readWindData(ans,'Fields','Codes').T[['tradedate','tradecode','tradestatus']]
except:
ans=pd.DataFrame(index=['tradedate','tradecode','tradestatus']).T
FB.saveIndexComponentData(ans, infoDF)
def getWindAEWeight(infoDF):
starttime=str(FB.getUpdateStartTime(infoDF['最新时间']))
starttime=starttime[:4]+'-'+starttime[4:6]+'-'+starttime[6:]
from WindPy import w
w.start()
ans=w.wset("indexhistory","startdate="+starttime+";enddate=2100-12-31;windcode=881001.WI")
w.close()
try:
ans=readWindData(ans,'Fields','Codes').T[['tradedate','tradecode','tradestatus']]
except:
ans= | pd.DataFrame(index=['tradedate','tradecode','tradestatus']) | pandas.DataFrame |
import requests
from web3 import Web3
import pandas
from time import time
# print("Using Unix TimeSamp:", int(time()))
CURRENT_TIME = int(time())
def integrate(times):
"""
Returns the seconds of staked times for an array of alternating deposit
and withdrawal times for a token.
"""
if len(times) == 1:
return CURRENT_TIME - times[0]
elif len(times) % 2 == 0:
s = 0
for i in range(0, len(times), 2):
s += times[i+1] - times[i]
return s
else:
times2 = [times.pop()]
return integrate(times)+integrate(times2)
# DTSPool contract address
CONTRACT_ADDR = "0x4f65ADeF0860390aB257F4A3e4eea147F892410a"
# Probably shouldn't share this but who's gonna read this anyways lmao
INFURA_ID = "313074df26854ed9899239ba251ebc7c"
ETHERSCAN_TOKEN = "<KEY>"
url_abi = f"https://api.etherscan.io/api?module=contract&action=getabi&address={CONTRACT_ADDR}&apikey={ETHERSCAN_TOKEN}"
w3 = Web3(Web3.HTTPProvider(f"https://mainnet.infura.io/v3/{INFURA_ID}"))
response = requests.get(url_abi)
print("Got contract ABI")
# Application binary interface
ABI = response.json()
checkAddr = Web3.toChecksumAddress(CONTRACT_ADDR)
contract = w3.eth.contract(checkAddr, abi=ABI["result"])
url_txns = f"https://api.etherscan.io/api?module=account&action=txlist&address={CONTRACT_ADDR}&apikey={ETHERSCAN_TOKEN}"
response = requests.get(url_txns)
print("Got all contract transactions")
# print(response.status_code)
# List of all transactions that involve DTSPool
data = response.json()
token_events = {k: [] for k in range(10000)}
# {addr: {123: [.., ...], 456: [...]}}
addr_token_events = {}
# len(data["result"])
for i in range(1, len(data["result"])):
if int(data["result"][i]["isError"]):
print("*error tx from:", data["result"][i]["from"])
continue
function_input = contract.decode_function_input(data["result"][i]["input"])
nftContract = function_input[1]["tokenContract"]
if nftContract != "0xc92d06C74A26AeAf4d1A1273FAC171f3B09FAC79":
print("Skipping deposited nft:", contract)
continue
timeStamp = data["result"][i]["timeStamp"]
addr = data["result"][i]["from"]
event = str(function_input[0]) # function
# print(event)
if "MultipleNFT" in event:
# print(d[1]["tokenIDList"])
for token in function_input[1]["tokenIDList"]:
token_events[token].append(timeStamp)
if not addr_token_events.get(addr):
addr_token_events.update({addr: {}})
# This is so not computationally efficient but ask me if I care
addr_token_events[addr].update({token: token_events[token]})
else:
# print(d[1]["tokenID"])
token_events[function_input[1]["tokenID"]].append(timeStamp)
if not addr_token_events.get(addr):
addr_token_events.update({addr: {}})
addr_token_events[addr].update(
{token: token_events[function_input[1]["tokenID"]]})
integrated_times = {k: 0 for k in addr_token_events}
for addr, v in addr_token_events.items():
# print(addr, v)
for times in v.values():
times = [int(t) for t in times]
# print(times)
# For every token calculate total staked times and add to that address
integrated_times[addr] += integrate(times)
# print(integrated_times)
# fml if I can remember in the future how does this work
# https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value
sorted_integrated_times = {k: v for k, v in sorted(
integrated_times.items(), key=lambda item: item[1], reverse=True) if v != 0}
df = | pandas.DataFrame.from_dict(sorted_integrated_times, orient='index') | pandas.DataFrame.from_dict |
import matplotlib
# matplotlib.use('pgf')
# pgf_with_pdflatex = {
# "pgf.texsystem": "pdflatex",
# "pgf.preamble": [
# r"\usepackage[utf8x]{inputenc}",
# r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
# ]
# }
# matplotlib.rcParams.update(pgf_with_pdflatex)
import pandas
import re
import numpy
from matplotlib import pyplot
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def build_dataframe_case(case):
# mobility data
mobility_columns = ['module', 'max_speed', 'min_speed', 'start_time', 'stop_time',
'total_co2', 'total_dist', 'total_time']
case_df_mobility = pandas.read_csv(case + '_stats_veinsmobility.csv')
case_df_mobility.columns = mobility_columns
mobility_search_re = 'ProvidenciaExampleScenario.(.+?).veinsmobility'
case_df_mobility['module'] = case_df_mobility['module'].map(lambda x: re.search(mobility_search_re, x).group(1))
case_df_mobility.set_index(['module'], inplace=True)
# appl data (sent warnings, arrived at dest)
appl_columns = ['module', 'arrived', 'rcvd_warnings', 'sent_warnings']
case_df_appl = pandas.read_csv(case + '_stats_appl.csv')
case_df_appl.columns = appl_columns
appl_search_re = 'ProvidenciaExampleScenario.(.+?).appl'
case_df_appl['module'] = case_df_appl['module'].map(lambda x: re.search(appl_search_re, x).group(1))
case_df_appl['arrived'] = case_df_appl['arrived'].map({1: True, 0: False})
case_df_appl.set_index(['module'], inplace=True)
case_df_speed = pandas.DataFrame()
case_df_speed['mean_speed'] = case_df_mobility['total_dist'] / case_df_mobility['total_time']
# join all tables
case_df = pandas.merge(case_df_mobility, case_df_appl, left_index=True, right_index=True, how='outer')
case_df = pandas.merge(case_df, case_df_speed, left_index=True, right_index=True, how='outer')
return case_df
def buid_csv():
for case in ['per00', 'per10']:
df = build_dataframe_case(case)
df.to_csv(case + '_total_stats.csv')
def arrived():
per00 = pandas.read_csv('per00_total_stats.csv')
per10 = pandas.read_csv('per10_total_stats.csv')
per00_arr = per00['arrived'].sum()
per10_arr = per10['arrived'].sum()
print(per00_arr, per10_arr)
def per00_vs_per10_distancetime():
per00 = pandas.read_csv('per00_total_stats.csv')
per10 = | pandas.read_csv('per10_total_stats.csv') | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.